Statistics
| Branch: | Revision:

root / target-arm / translate.c @ f73534a5

History | View | Annotate | Download (314.1 kB)

1
/*
2
 *  ARM translation
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *  Copyright (c) 2005-2007 CodeSourcery
6
 *  Copyright (c) 2007 OpenedHand, Ltd.
7
 *
8
 * This library is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU Lesser General Public
10
 * License as published by the Free Software Foundation; either
11
 * version 2 of the License, or (at your option) any later version.
12
 *
13
 * This library is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * Lesser General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20
 */
21
#include <stdarg.h>
22
#include <stdlib.h>
23
#include <stdio.h>
24
#include <string.h>
25
#include <inttypes.h>
26

    
27
#include "cpu.h"
28
#include "exec-all.h"
29
#include "disas.h"
30
#include "tcg-op.h"
31
#include "qemu-log.h"
32

    
33
#include "helpers.h"
34
#define GEN_HELPER 1
35
#include "helpers.h"
36

    
37
#define ENABLE_ARCH_5J    0
38
#define ENABLE_ARCH_6     arm_feature(env, ARM_FEATURE_V6)
39
#define ENABLE_ARCH_6K   arm_feature(env, ARM_FEATURE_V6K)
40
#define ENABLE_ARCH_6T2   arm_feature(env, ARM_FEATURE_THUMB2)
41
#define ENABLE_ARCH_7     arm_feature(env, ARM_FEATURE_V7)
42

    
43
#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
44

    
45
/* internal defines */
46
typedef struct DisasContext {
47
    target_ulong pc;
48
    int is_jmp;
49
    /* Nonzero if this instruction has been conditionally skipped.  */
50
    int condjmp;
51
    /* The label that will be jumped to when the instruction is skipped.  */
52
    int condlabel;
53
    /* Thumb-2 condtional execution bits.  */
54
    int condexec_mask;
55
    int condexec_cond;
56
    struct TranslationBlock *tb;
57
    int singlestep_enabled;
58
    int thumb;
59
#if !defined(CONFIG_USER_ONLY)
60
    int user;
61
#endif
62
} DisasContext;
63

    
64
#if defined(CONFIG_USER_ONLY)
65
#define IS_USER(s) 1
66
#else
67
#define IS_USER(s) (s->user)
68
#endif
69

    
70
/* These instructions trap after executing, so defer them until after the
71
   conditional executions state has been updated.  */
72
#define DISAS_WFI 4
73
#define DISAS_SWI 5
74

    
75
static TCGv_ptr cpu_env;
76
/* We reuse the same 64-bit temporaries for efficiency.  */
77
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
78
static TCGv_i32 cpu_R[16];
79
static TCGv_i32 cpu_exclusive_addr;
80
static TCGv_i32 cpu_exclusive_val;
81
static TCGv_i32 cpu_exclusive_high;
82
#ifdef CONFIG_USER_ONLY
83
static TCGv_i32 cpu_exclusive_test;
84
static TCGv_i32 cpu_exclusive_info;
85
#endif
86

    
87
/* FIXME:  These should be removed.  */
88
static TCGv cpu_F0s, cpu_F1s;
89
static TCGv_i64 cpu_F0d, cpu_F1d;
90

    
91
#include "gen-icount.h"
92

    
93
static const char *regnames[] =
94
    { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95
      "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
96

    
97
/* initialize TCG globals.  */
98
void arm_translate_init(void)
99
{
100
    int i;
101

    
102
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103

    
104
    for (i = 0; i < 16; i++) {
105
        cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
106
                                          offsetof(CPUState, regs[i]),
107
                                          regnames[i]);
108
    }
109
    cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
110
        offsetof(CPUState, exclusive_addr), "exclusive_addr");
111
    cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
112
        offsetof(CPUState, exclusive_val), "exclusive_val");
113
    cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
114
        offsetof(CPUState, exclusive_high), "exclusive_high");
115
#ifdef CONFIG_USER_ONLY
116
    cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
117
        offsetof(CPUState, exclusive_test), "exclusive_test");
118
    cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
119
        offsetof(CPUState, exclusive_info), "exclusive_info");
120
#endif
121

    
122
#define GEN_HELPER 2
123
#include "helpers.h"
124
}
125

    
126
static int num_temps;
127

    
128
/* Allocate a temporary variable.  */
129
static TCGv_i32 new_tmp(void)
130
{
131
    num_temps++;
132
    return tcg_temp_new_i32();
133
}
134

    
135
/* Release a temporary variable.  */
136
static void dead_tmp(TCGv tmp)
137
{
138
    tcg_temp_free(tmp);
139
    num_temps--;
140
}
141

    
142
static inline TCGv load_cpu_offset(int offset)
143
{
144
    TCGv tmp = new_tmp();
145
    tcg_gen_ld_i32(tmp, cpu_env, offset);
146
    return tmp;
147
}
148

    
149
#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
150

    
151
static inline void store_cpu_offset(TCGv var, int offset)
152
{
153
    tcg_gen_st_i32(var, cpu_env, offset);
154
    dead_tmp(var);
155
}
156

    
157
#define store_cpu_field(var, name) \
158
    store_cpu_offset(var, offsetof(CPUState, name))
159

    
160
/* Set a variable to the value of a CPU register.  */
161
static void load_reg_var(DisasContext *s, TCGv var, int reg)
162
{
163
    if (reg == 15) {
164
        uint32_t addr;
165
        /* normaly, since we updated PC, we need only to add one insn */
166
        if (s->thumb)
167
            addr = (long)s->pc + 2;
168
        else
169
            addr = (long)s->pc + 4;
170
        tcg_gen_movi_i32(var, addr);
171
    } else {
172
        tcg_gen_mov_i32(var, cpu_R[reg]);
173
    }
174
}
175

    
176
/* Create a new temporary and set it to the value of a CPU register.  */
177
static inline TCGv load_reg(DisasContext *s, int reg)
178
{
179
    TCGv tmp = new_tmp();
180
    load_reg_var(s, tmp, reg);
181
    return tmp;
182
}
183

    
184
/* Set a CPU register.  The source must be a temporary and will be
185
   marked as dead.  */
186
static void store_reg(DisasContext *s, int reg, TCGv var)
187
{
188
    if (reg == 15) {
189
        tcg_gen_andi_i32(var, var, ~1);
190
        s->is_jmp = DISAS_JUMP;
191
    }
192
    tcg_gen_mov_i32(cpu_R[reg], var);
193
    dead_tmp(var);
194
}
195

    
196
/* Value extensions.  */
197
#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198
#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
199
#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200
#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201

    
202
#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203
#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
204

    
205

    
206
static inline void gen_set_cpsr(TCGv var, uint32_t mask)
207
{
208
    TCGv tmp_mask = tcg_const_i32(mask);
209
    gen_helper_cpsr_write(var, tmp_mask);
210
    tcg_temp_free_i32(tmp_mask);
211
}
212
/* Set NZCV flags from the high 4 bits of var.  */
213
#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214

    
215
static void gen_exception(int excp)
216
{
217
    TCGv tmp = new_tmp();
218
    tcg_gen_movi_i32(tmp, excp);
219
    gen_helper_exception(tmp);
220
    dead_tmp(tmp);
221
}
222

    
223
static void gen_smul_dual(TCGv a, TCGv b)
224
{
225
    TCGv tmp1 = new_tmp();
226
    TCGv tmp2 = new_tmp();
227
    tcg_gen_ext16s_i32(tmp1, a);
228
    tcg_gen_ext16s_i32(tmp2, b);
229
    tcg_gen_mul_i32(tmp1, tmp1, tmp2);
230
    dead_tmp(tmp2);
231
    tcg_gen_sari_i32(a, a, 16);
232
    tcg_gen_sari_i32(b, b, 16);
233
    tcg_gen_mul_i32(b, b, a);
234
    tcg_gen_mov_i32(a, tmp1);
235
    dead_tmp(tmp1);
236
}
237

    
238
/* Byteswap each halfword.  */
239
static void gen_rev16(TCGv var)
240
{
241
    TCGv tmp = new_tmp();
242
    tcg_gen_shri_i32(tmp, var, 8);
243
    tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
244
    tcg_gen_shli_i32(var, var, 8);
245
    tcg_gen_andi_i32(var, var, 0xff00ff00);
246
    tcg_gen_or_i32(var, var, tmp);
247
    dead_tmp(tmp);
248
}
249

    
250
/* Byteswap low halfword and sign extend.  */
251
static void gen_revsh(TCGv var)
252
{
253
    TCGv tmp = new_tmp();
254
    tcg_gen_shri_i32(tmp, var, 8);
255
    tcg_gen_andi_i32(tmp, tmp, 0x00ff);
256
    tcg_gen_shli_i32(var, var, 8);
257
    tcg_gen_ext8s_i32(var, var);
258
    tcg_gen_or_i32(var, var, tmp);
259
    dead_tmp(tmp);
260
}
261

    
262
/* Unsigned bitfield extract.  */
263
static void gen_ubfx(TCGv var, int shift, uint32_t mask)
264
{
265
    if (shift)
266
        tcg_gen_shri_i32(var, var, shift);
267
    tcg_gen_andi_i32(var, var, mask);
268
}
269

    
270
/* Signed bitfield extract.  */
271
static void gen_sbfx(TCGv var, int shift, int width)
272
{
273
    uint32_t signbit;
274

    
275
    if (shift)
276
        tcg_gen_sari_i32(var, var, shift);
277
    if (shift + width < 32) {
278
        signbit = 1u << (width - 1);
279
        tcg_gen_andi_i32(var, var, (1u << width) - 1);
280
        tcg_gen_xori_i32(var, var, signbit);
281
        tcg_gen_subi_i32(var, var, signbit);
282
    }
283
}
284

    
285
/* Bitfield insertion.  Insert val into base.  Clobbers base and val.  */
286
static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
287
{
288
    tcg_gen_andi_i32(val, val, mask);
289
    tcg_gen_shli_i32(val, val, shift);
290
    tcg_gen_andi_i32(base, base, ~(mask << shift));
291
    tcg_gen_or_i32(dest, base, val);
292
}
293

    
294
/* Round the top 32 bits of a 64-bit value.  */
295
static void gen_roundqd(TCGv a, TCGv b)
296
{
297
    tcg_gen_shri_i32(a, a, 31);
298
    tcg_gen_add_i32(a, a, b);
299
}
300

    
301
/* FIXME: Most targets have native widening multiplication.
302
   It would be good to use that instead of a full wide multiply.  */
303
/* 32x32->64 multiply.  Marks inputs as dead.  */
304
static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
305
{
306
    TCGv_i64 tmp1 = tcg_temp_new_i64();
307
    TCGv_i64 tmp2 = tcg_temp_new_i64();
308

    
309
    tcg_gen_extu_i32_i64(tmp1, a);
310
    dead_tmp(a);
311
    tcg_gen_extu_i32_i64(tmp2, b);
312
    dead_tmp(b);
313
    tcg_gen_mul_i64(tmp1, tmp1, tmp2);
314
    tcg_temp_free_i64(tmp2);
315
    return tmp1;
316
}
317

    
318
static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
319
{
320
    TCGv_i64 tmp1 = tcg_temp_new_i64();
321
    TCGv_i64 tmp2 = tcg_temp_new_i64();
322

    
323
    tcg_gen_ext_i32_i64(tmp1, a);
324
    dead_tmp(a);
325
    tcg_gen_ext_i32_i64(tmp2, b);
326
    dead_tmp(b);
327
    tcg_gen_mul_i64(tmp1, tmp1, tmp2);
328
    tcg_temp_free_i64(tmp2);
329
    return tmp1;
330
}
331

    
332
/* Signed 32x32->64 multiply.  */
333
static void gen_imull(TCGv a, TCGv b)
334
{
335
    TCGv_i64 tmp1 = tcg_temp_new_i64();
336
    TCGv_i64 tmp2 = tcg_temp_new_i64();
337

    
338
    tcg_gen_ext_i32_i64(tmp1, a);
339
    tcg_gen_ext_i32_i64(tmp2, b);
340
    tcg_gen_mul_i64(tmp1, tmp1, tmp2);
341
    tcg_temp_free_i64(tmp2);
342
    tcg_gen_trunc_i64_i32(a, tmp1);
343
    tcg_gen_shri_i64(tmp1, tmp1, 32);
344
    tcg_gen_trunc_i64_i32(b, tmp1);
345
    tcg_temp_free_i64(tmp1);
346
}
347

    
348
/* Swap low and high halfwords.  */
349
static void gen_swap_half(TCGv var)
350
{
351
    TCGv tmp = new_tmp();
352
    tcg_gen_shri_i32(tmp, var, 16);
353
    tcg_gen_shli_i32(var, var, 16);
354
    tcg_gen_or_i32(var, var, tmp);
355
    dead_tmp(tmp);
356
}
357

    
358
/* Dual 16-bit add.  Result placed in t0 and t1 is marked as dead.
359
    tmp = (t0 ^ t1) & 0x8000;
360
    t0 &= ~0x8000;
361
    t1 &= ~0x8000;
362
    t0 = (t0 + t1) ^ tmp;
363
 */
364

    
365
static void gen_add16(TCGv t0, TCGv t1)
366
{
367
    TCGv tmp = new_tmp();
368
    tcg_gen_xor_i32(tmp, t0, t1);
369
    tcg_gen_andi_i32(tmp, tmp, 0x8000);
370
    tcg_gen_andi_i32(t0, t0, ~0x8000);
371
    tcg_gen_andi_i32(t1, t1, ~0x8000);
372
    tcg_gen_add_i32(t0, t0, t1);
373
    tcg_gen_xor_i32(t0, t0, tmp);
374
    dead_tmp(tmp);
375
    dead_tmp(t1);
376
}
377

    
378
#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
379

    
380
/* Set CF to the top bit of var.  */
381
static void gen_set_CF_bit31(TCGv var)
382
{
383
    TCGv tmp = new_tmp();
384
    tcg_gen_shri_i32(tmp, var, 31);
385
    gen_set_CF(tmp);
386
    dead_tmp(tmp);
387
}
388

    
389
/* Set N and Z flags from var.  */
390
static inline void gen_logic_CC(TCGv var)
391
{
392
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
393
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
394
}
395

    
396
/* T0 += T1 + CF.  */
397
static void gen_adc(TCGv t0, TCGv t1)
398
{
399
    TCGv tmp;
400
    tcg_gen_add_i32(t0, t0, t1);
401
    tmp = load_cpu_field(CF);
402
    tcg_gen_add_i32(t0, t0, tmp);
403
    dead_tmp(tmp);
404
}
405

    
406
/* dest = T0 + T1 + CF. */
407
static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
408
{
409
    TCGv tmp;
410
    tcg_gen_add_i32(dest, t0, t1);
411
    tmp = load_cpu_field(CF);
412
    tcg_gen_add_i32(dest, dest, tmp);
413
    dead_tmp(tmp);
414
}
415

    
416
/* dest = T0 - T1 + CF - 1.  */
417
static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
418
{
419
    TCGv tmp;
420
    tcg_gen_sub_i32(dest, t0, t1);
421
    tmp = load_cpu_field(CF);
422
    tcg_gen_add_i32(dest, dest, tmp);
423
    tcg_gen_subi_i32(dest, dest, 1);
424
    dead_tmp(tmp);
425
}
426

    
427
/* FIXME:  Implement this natively.  */
428
#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
429

    
430
static void shifter_out_im(TCGv var, int shift)
431
{
432
    TCGv tmp = new_tmp();
433
    if (shift == 0) {
434
        tcg_gen_andi_i32(tmp, var, 1);
435
    } else {
436
        tcg_gen_shri_i32(tmp, var, shift);
437
        if (shift != 31)
438
            tcg_gen_andi_i32(tmp, tmp, 1);
439
    }
440
    gen_set_CF(tmp);
441
    dead_tmp(tmp);
442
}
443

    
444
/* Shift by immediate.  Includes special handling for shift == 0.  */
445
static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
446
{
447
    switch (shiftop) {
448
    case 0: /* LSL */
449
        if (shift != 0) {
450
            if (flags)
451
                shifter_out_im(var, 32 - shift);
452
            tcg_gen_shli_i32(var, var, shift);
453
        }
454
        break;
455
    case 1: /* LSR */
456
        if (shift == 0) {
457
            if (flags) {
458
                tcg_gen_shri_i32(var, var, 31);
459
                gen_set_CF(var);
460
            }
461
            tcg_gen_movi_i32(var, 0);
462
        } else {
463
            if (flags)
464
                shifter_out_im(var, shift - 1);
465
            tcg_gen_shri_i32(var, var, shift);
466
        }
467
        break;
468
    case 2: /* ASR */
469
        if (shift == 0)
470
            shift = 32;
471
        if (flags)
472
            shifter_out_im(var, shift - 1);
473
        if (shift == 32)
474
          shift = 31;
475
        tcg_gen_sari_i32(var, var, shift);
476
        break;
477
    case 3: /* ROR/RRX */
478
        if (shift != 0) {
479
            if (flags)
480
                shifter_out_im(var, shift - 1);
481
            tcg_gen_rotri_i32(var, var, shift); break;
482
        } else {
483
            TCGv tmp = load_cpu_field(CF);
484
            if (flags)
485
                shifter_out_im(var, 0);
486
            tcg_gen_shri_i32(var, var, 1);
487
            tcg_gen_shli_i32(tmp, tmp, 31);
488
            tcg_gen_or_i32(var, var, tmp);
489
            dead_tmp(tmp);
490
        }
491
    }
492
};
493

    
494
static inline void gen_arm_shift_reg(TCGv var, int shiftop,
495
                                     TCGv shift, int flags)
496
{
497
    if (flags) {
498
        switch (shiftop) {
499
        case 0: gen_helper_shl_cc(var, var, shift); break;
500
        case 1: gen_helper_shr_cc(var, var, shift); break;
501
        case 2: gen_helper_sar_cc(var, var, shift); break;
502
        case 3: gen_helper_ror_cc(var, var, shift); break;
503
        }
504
    } else {
505
        switch (shiftop) {
506
        case 0: gen_helper_shl(var, var, shift); break;
507
        case 1: gen_helper_shr(var, var, shift); break;
508
        case 2: gen_helper_sar(var, var, shift); break;
509
        case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
510
                tcg_gen_rotr_i32(var, var, shift); break;
511
        }
512
    }
513
    dead_tmp(shift);
514
}
515

    
516
#define PAS_OP(pfx) \
517
    switch (op2) {  \
518
    case 0: gen_pas_helper(glue(pfx,add16)); break; \
519
    case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
520
    case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
521
    case 3: gen_pas_helper(glue(pfx,sub16)); break; \
522
    case 4: gen_pas_helper(glue(pfx,add8)); break; \
523
    case 7: gen_pas_helper(glue(pfx,sub8)); break; \
524
    }
525
static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
526
{
527
    TCGv_ptr tmp;
528

    
529
    switch (op1) {
530
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
531
    case 1:
532
        tmp = tcg_temp_new_ptr();
533
        tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534
        PAS_OP(s)
535
        tcg_temp_free_ptr(tmp);
536
        break;
537
    case 5:
538
        tmp = tcg_temp_new_ptr();
539
        tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540
        PAS_OP(u)
541
        tcg_temp_free_ptr(tmp);
542
        break;
543
#undef gen_pas_helper
544
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
545
    case 2:
546
        PAS_OP(q);
547
        break;
548
    case 3:
549
        PAS_OP(sh);
550
        break;
551
    case 6:
552
        PAS_OP(uq);
553
        break;
554
    case 7:
555
        PAS_OP(uh);
556
        break;
557
#undef gen_pas_helper
558
    }
559
}
560
#undef PAS_OP
561

    
562
/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings.  */
563
#define PAS_OP(pfx) \
564
    switch (op1) {  \
565
    case 0: gen_pas_helper(glue(pfx,add8)); break; \
566
    case 1: gen_pas_helper(glue(pfx,add16)); break; \
567
    case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
568
    case 4: gen_pas_helper(glue(pfx,sub8)); break; \
569
    case 5: gen_pas_helper(glue(pfx,sub16)); break; \
570
    case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
571
    }
572
static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
573
{
574
    TCGv_ptr tmp;
575

    
576
    switch (op2) {
577
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
578
    case 0:
579
        tmp = tcg_temp_new_ptr();
580
        tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581
        PAS_OP(s)
582
        tcg_temp_free_ptr(tmp);
583
        break;
584
    case 4:
585
        tmp = tcg_temp_new_ptr();
586
        tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587
        PAS_OP(u)
588
        tcg_temp_free_ptr(tmp);
589
        break;
590
#undef gen_pas_helper
591
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
592
    case 1:
593
        PAS_OP(q);
594
        break;
595
    case 2:
596
        PAS_OP(sh);
597
        break;
598
    case 5:
599
        PAS_OP(uq);
600
        break;
601
    case 6:
602
        PAS_OP(uh);
603
        break;
604
#undef gen_pas_helper
605
    }
606
}
607
#undef PAS_OP
608

    
609
static void gen_test_cc(int cc, int label)
610
{
611
    TCGv tmp;
612
    TCGv tmp2;
613
    int inv;
614

    
615
    switch (cc) {
616
    case 0: /* eq: Z */
617
        tmp = load_cpu_field(ZF);
618
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
619
        break;
620
    case 1: /* ne: !Z */
621
        tmp = load_cpu_field(ZF);
622
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
623
        break;
624
    case 2: /* cs: C */
625
        tmp = load_cpu_field(CF);
626
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
627
        break;
628
    case 3: /* cc: !C */
629
        tmp = load_cpu_field(CF);
630
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
631
        break;
632
    case 4: /* mi: N */
633
        tmp = load_cpu_field(NF);
634
        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
635
        break;
636
    case 5: /* pl: !N */
637
        tmp = load_cpu_field(NF);
638
        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
639
        break;
640
    case 6: /* vs: V */
641
        tmp = load_cpu_field(VF);
642
        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
643
        break;
644
    case 7: /* vc: !V */
645
        tmp = load_cpu_field(VF);
646
        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
647
        break;
648
    case 8: /* hi: C && !Z */
649
        inv = gen_new_label();
650
        tmp = load_cpu_field(CF);
651
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
652
        dead_tmp(tmp);
653
        tmp = load_cpu_field(ZF);
654
        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
655
        gen_set_label(inv);
656
        break;
657
    case 9: /* ls: !C || Z */
658
        tmp = load_cpu_field(CF);
659
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
660
        dead_tmp(tmp);
661
        tmp = load_cpu_field(ZF);
662
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
663
        break;
664
    case 10: /* ge: N == V -> N ^ V == 0 */
665
        tmp = load_cpu_field(VF);
666
        tmp2 = load_cpu_field(NF);
667
        tcg_gen_xor_i32(tmp, tmp, tmp2);
668
        dead_tmp(tmp2);
669
        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
670
        break;
671
    case 11: /* lt: N != V -> N ^ V != 0 */
672
        tmp = load_cpu_field(VF);
673
        tmp2 = load_cpu_field(NF);
674
        tcg_gen_xor_i32(tmp, tmp, tmp2);
675
        dead_tmp(tmp2);
676
        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
677
        break;
678
    case 12: /* gt: !Z && N == V */
679
        inv = gen_new_label();
680
        tmp = load_cpu_field(ZF);
681
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
682
        dead_tmp(tmp);
683
        tmp = load_cpu_field(VF);
684
        tmp2 = load_cpu_field(NF);
685
        tcg_gen_xor_i32(tmp, tmp, tmp2);
686
        dead_tmp(tmp2);
687
        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
688
        gen_set_label(inv);
689
        break;
690
    case 13: /* le: Z || N != V */
691
        tmp = load_cpu_field(ZF);
692
        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
693
        dead_tmp(tmp);
694
        tmp = load_cpu_field(VF);
695
        tmp2 = load_cpu_field(NF);
696
        tcg_gen_xor_i32(tmp, tmp, tmp2);
697
        dead_tmp(tmp2);
698
        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
699
        break;
700
    default:
701
        fprintf(stderr, "Bad condition code 0x%x\n", cc);
702
        abort();
703
    }
704
    dead_tmp(tmp);
705
}
706

    
707
static const uint8_t table_logic_cc[16] = {
708
    1, /* and */
709
    1, /* xor */
710
    0, /* sub */
711
    0, /* rsb */
712
    0, /* add */
713
    0, /* adc */
714
    0, /* sbc */
715
    0, /* rsc */
716
    1, /* andl */
717
    1, /* xorl */
718
    0, /* cmp */
719
    0, /* cmn */
720
    1, /* orr */
721
    1, /* mov */
722
    1, /* bic */
723
    1, /* mvn */
724
};
725

    
726
/* Set PC and Thumb state from an immediate address.  */
727
static inline void gen_bx_im(DisasContext *s, uint32_t addr)
728
{
729
    TCGv tmp;
730

    
731
    s->is_jmp = DISAS_UPDATE;
732
    if (s->thumb != (addr & 1)) {
733
        tmp = new_tmp();
734
        tcg_gen_movi_i32(tmp, addr & 1);
735
        tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
736
        dead_tmp(tmp);
737
    }
738
    tcg_gen_movi_i32(cpu_R[15], addr & ~1);
739
}
740

    
741
/* Set PC and Thumb state from var.  var is marked as dead.  */
742
static inline void gen_bx(DisasContext *s, TCGv var)
743
{
744
    s->is_jmp = DISAS_UPDATE;
745
    tcg_gen_andi_i32(cpu_R[15], var, ~1);
746
    tcg_gen_andi_i32(var, var, 1);
747
    store_cpu_field(var, thumb);
748
}
749

    
750
/* Variant of store_reg which uses branch&exchange logic when storing
751
   to r15 in ARM architecture v7 and above. The source must be a temporary
752
   and will be marked as dead. */
753
static inline void store_reg_bx(CPUState *env, DisasContext *s,
754
                                int reg, TCGv var)
755
{
756
    if (reg == 15 && ENABLE_ARCH_7) {
757
        gen_bx(s, var);
758
    } else {
759
        store_reg(s, reg, var);
760
    }
761
}
762

    
763
static inline TCGv gen_ld8s(TCGv addr, int index)
764
{
765
    TCGv tmp = new_tmp();
766
    tcg_gen_qemu_ld8s(tmp, addr, index);
767
    return tmp;
768
}
769
static inline TCGv gen_ld8u(TCGv addr, int index)
770
{
771
    TCGv tmp = new_tmp();
772
    tcg_gen_qemu_ld8u(tmp, addr, index);
773
    return tmp;
774
}
775
static inline TCGv gen_ld16s(TCGv addr, int index)
776
{
777
    TCGv tmp = new_tmp();
778
    tcg_gen_qemu_ld16s(tmp, addr, index);
779
    return tmp;
780
}
781
static inline TCGv gen_ld16u(TCGv addr, int index)
782
{
783
    TCGv tmp = new_tmp();
784
    tcg_gen_qemu_ld16u(tmp, addr, index);
785
    return tmp;
786
}
787
static inline TCGv gen_ld32(TCGv addr, int index)
788
{
789
    TCGv tmp = new_tmp();
790
    tcg_gen_qemu_ld32u(tmp, addr, index);
791
    return tmp;
792
}
793
static inline TCGv_i64 gen_ld64(TCGv addr, int index)
794
{
795
    TCGv_i64 tmp = tcg_temp_new_i64();
796
    tcg_gen_qemu_ld64(tmp, addr, index);
797
    return tmp;
798
}
799
static inline void gen_st8(TCGv val, TCGv addr, int index)
800
{
801
    tcg_gen_qemu_st8(val, addr, index);
802
    dead_tmp(val);
803
}
804
static inline void gen_st16(TCGv val, TCGv addr, int index)
805
{
806
    tcg_gen_qemu_st16(val, addr, index);
807
    dead_tmp(val);
808
}
809
static inline void gen_st32(TCGv val, TCGv addr, int index)
810
{
811
    tcg_gen_qemu_st32(val, addr, index);
812
    dead_tmp(val);
813
}
814
static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
815
{
816
    tcg_gen_qemu_st64(val, addr, index);
817
    tcg_temp_free_i64(val);
818
}
819

    
820
static inline void gen_set_pc_im(uint32_t val)
821
{
822
    tcg_gen_movi_i32(cpu_R[15], val);
823
}
824

    
825
/* Force a TB lookup after an instruction that changes the CPU state.  */
826
static inline void gen_lookup_tb(DisasContext *s)
827
{
828
    tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
829
    s->is_jmp = DISAS_UPDATE;
830
}
831

    
832
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
833
                                       TCGv var)
834
{
835
    int val, rm, shift, shiftop;
836
    TCGv offset;
837

    
838
    if (!(insn & (1 << 25))) {
839
        /* immediate */
840
        val = insn & 0xfff;
841
        if (!(insn & (1 << 23)))
842
            val = -val;
843
        if (val != 0)
844
            tcg_gen_addi_i32(var, var, val);
845
    } else {
846
        /* shift/register */
847
        rm = (insn) & 0xf;
848
        shift = (insn >> 7) & 0x1f;
849
        shiftop = (insn >> 5) & 3;
850
        offset = load_reg(s, rm);
851
        gen_arm_shift_im(offset, shiftop, shift, 0);
852
        if (!(insn & (1 << 23)))
853
            tcg_gen_sub_i32(var, var, offset);
854
        else
855
            tcg_gen_add_i32(var, var, offset);
856
        dead_tmp(offset);
857
    }
858
}
859

    
860
static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
861
                                        int extra, TCGv var)
862
{
863
    int val, rm;
864
    TCGv offset;
865

    
866
    if (insn & (1 << 22)) {
867
        /* immediate */
868
        val = (insn & 0xf) | ((insn >> 4) & 0xf0);
869
        if (!(insn & (1 << 23)))
870
            val = -val;
871
        val += extra;
872
        if (val != 0)
873
            tcg_gen_addi_i32(var, var, val);
874
    } else {
875
        /* register */
876
        if (extra)
877
            tcg_gen_addi_i32(var, var, extra);
878
        rm = (insn) & 0xf;
879
        offset = load_reg(s, rm);
880
        if (!(insn & (1 << 23)))
881
            tcg_gen_sub_i32(var, var, offset);
882
        else
883
            tcg_gen_add_i32(var, var, offset);
884
        dead_tmp(offset);
885
    }
886
}
887

    
888
#define VFP_OP2(name)                                                 \
889
static inline void gen_vfp_##name(int dp)                             \
890
{                                                                     \
891
    if (dp)                                                           \
892
        gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
893
    else                                                              \
894
        gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
895
}
896

    
897
VFP_OP2(add)
898
VFP_OP2(sub)
899
VFP_OP2(mul)
900
VFP_OP2(div)
901

    
902
#undef VFP_OP2
903

    
904
static inline void gen_vfp_abs(int dp)
905
{
906
    if (dp)
907
        gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
908
    else
909
        gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
910
}
911

    
912
static inline void gen_vfp_neg(int dp)
913
{
914
    if (dp)
915
        gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
916
    else
917
        gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
918
}
919

    
920
static inline void gen_vfp_sqrt(int dp)
921
{
922
    if (dp)
923
        gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
924
    else
925
        gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
926
}
927

    
928
static inline void gen_vfp_cmp(int dp)
929
{
930
    if (dp)
931
        gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
932
    else
933
        gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
934
}
935

    
936
static inline void gen_vfp_cmpe(int dp)
937
{
938
    if (dp)
939
        gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
940
    else
941
        gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
942
}
943

    
944
static inline void gen_vfp_F1_ld0(int dp)
945
{
946
    if (dp)
947
        tcg_gen_movi_i64(cpu_F1d, 0);
948
    else
949
        tcg_gen_movi_i32(cpu_F1s, 0);
950
}
951

    
952
static inline void gen_vfp_uito(int dp)
953
{
954
    if (dp)
955
        gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
956
    else
957
        gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
958
}
959

    
960
static inline void gen_vfp_sito(int dp)
961
{
962
    if (dp)
963
        gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
964
    else
965
        gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
966
}
967

    
968
static inline void gen_vfp_toui(int dp)
969
{
970
    if (dp)
971
        gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
972
    else
973
        gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
974
}
975

    
976
static inline void gen_vfp_touiz(int dp)
977
{
978
    if (dp)
979
        gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
980
    else
981
        gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
982
}
983

    
984
static inline void gen_vfp_tosi(int dp)
985
{
986
    if (dp)
987
        gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
988
    else
989
        gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
990
}
991

    
992
static inline void gen_vfp_tosiz(int dp)
993
{
994
    if (dp)
995
        gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
996
    else
997
        gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
998
}
999

    
1000
#define VFP_GEN_FIX(name) \
1001
static inline void gen_vfp_##name(int dp, int shift) \
1002
{ \
1003
    TCGv tmp_shift = tcg_const_i32(shift); \
1004
    if (dp) \
1005
        gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1006
    else \
1007
        gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1008
    tcg_temp_free_i32(tmp_shift); \
1009
}
1010
VFP_GEN_FIX(tosh)
1011
VFP_GEN_FIX(tosl)
1012
VFP_GEN_FIX(touh)
1013
VFP_GEN_FIX(toul)
1014
VFP_GEN_FIX(shto)
1015
VFP_GEN_FIX(slto)
1016
VFP_GEN_FIX(uhto)
1017
VFP_GEN_FIX(ulto)
1018
#undef VFP_GEN_FIX
1019

    
1020
static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1021
{
1022
    if (dp)
1023
        tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1024
    else
1025
        tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1026
}
1027

    
1028
static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1029
{
1030
    if (dp)
1031
        tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1032
    else
1033
        tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1034
}
1035

    
1036
static inline long
1037
vfp_reg_offset (int dp, int reg)
1038
{
1039
    if (dp)
1040
        return offsetof(CPUARMState, vfp.regs[reg]);
1041
    else if (reg & 1) {
1042
        return offsetof(CPUARMState, vfp.regs[reg >> 1])
1043
          + offsetof(CPU_DoubleU, l.upper);
1044
    } else {
1045
        return offsetof(CPUARMState, vfp.regs[reg >> 1])
1046
          + offsetof(CPU_DoubleU, l.lower);
1047
    }
1048
}
1049

    
1050
/* Return the offset of a 32-bit piece of a NEON register.
1051
   zero is the least significant end of the register.  */
1052
static inline long
1053
neon_reg_offset (int reg, int n)
1054
{
1055
    int sreg;
1056
    sreg = reg * 2 + n;
1057
    return vfp_reg_offset(0, sreg);
1058
}
1059

    
1060
static TCGv neon_load_reg(int reg, int pass)
1061
{
1062
    TCGv tmp = new_tmp();
1063
    tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1064
    return tmp;
1065
}
1066

    
1067
static void neon_store_reg(int reg, int pass, TCGv var)
1068
{
1069
    tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1070
    dead_tmp(var);
1071
}
1072

    
1073
static inline void neon_load_reg64(TCGv_i64 var, int reg)
1074
{
1075
    tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1076
}
1077

    
1078
static inline void neon_store_reg64(TCGv_i64 var, int reg)
1079
{
1080
    tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1081
}
1082

    
1083
#define tcg_gen_ld_f32 tcg_gen_ld_i32
1084
#define tcg_gen_ld_f64 tcg_gen_ld_i64
1085
#define tcg_gen_st_f32 tcg_gen_st_i32
1086
#define tcg_gen_st_f64 tcg_gen_st_i64
1087

    
1088
static inline void gen_mov_F0_vreg(int dp, int reg)
1089
{
1090
    if (dp)
1091
        tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1092
    else
1093
        tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1094
}
1095

    
1096
static inline void gen_mov_F1_vreg(int dp, int reg)
1097
{
1098
    if (dp)
1099
        tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1100
    else
1101
        tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1102
}
1103

    
1104
static inline void gen_mov_vreg_F0(int dp, int reg)
1105
{
1106
    if (dp)
1107
        tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1108
    else
1109
        tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1110
}
1111

    
1112
#define ARM_CP_RW_BIT        (1 << 20)
1113

    
1114
static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1115
{
1116
    tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1117
}
1118

    
1119
static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1120
{
1121
    tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1122
}
1123

    
1124
static inline TCGv iwmmxt_load_creg(int reg)
1125
{
1126
    TCGv var = new_tmp();
1127
    tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1128
    return var;
1129
}
1130

    
1131
static inline void iwmmxt_store_creg(int reg, TCGv var)
1132
{
1133
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1134
    dead_tmp(var);
1135
}
1136

    
1137
static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1138
{
1139
    iwmmxt_store_reg(cpu_M0, rn);
1140
}
1141

    
1142
static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1143
{
1144
    iwmmxt_load_reg(cpu_M0, rn);
1145
}
1146

    
1147
static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1148
{
1149
    iwmmxt_load_reg(cpu_V1, rn);
1150
    tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1151
}
1152

    
1153
static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1154
{
1155
    iwmmxt_load_reg(cpu_V1, rn);
1156
    tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1157
}
1158

    
1159
static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1160
{
1161
    iwmmxt_load_reg(cpu_V1, rn);
1162
    tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1163
}
1164

    
1165
#define IWMMXT_OP(name) \
1166
static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1167
{ \
1168
    iwmmxt_load_reg(cpu_V1, rn); \
1169
    gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1170
}
1171

    
1172
#define IWMMXT_OP_ENV(name) \
1173
static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1174
{ \
1175
    iwmmxt_load_reg(cpu_V1, rn); \
1176
    gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1177
}
1178

    
1179
#define IWMMXT_OP_ENV_SIZE(name) \
1180
IWMMXT_OP_ENV(name##b) \
1181
IWMMXT_OP_ENV(name##w) \
1182
IWMMXT_OP_ENV(name##l)
1183

    
1184
#define IWMMXT_OP_ENV1(name) \
1185
static inline void gen_op_iwmmxt_##name##_M0(void) \
1186
{ \
1187
    gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1188
}
1189

    
1190
IWMMXT_OP(maddsq)
1191
IWMMXT_OP(madduq)
1192
IWMMXT_OP(sadb)
1193
IWMMXT_OP(sadw)
1194
IWMMXT_OP(mulslw)
1195
IWMMXT_OP(mulshw)
1196
IWMMXT_OP(mululw)
1197
IWMMXT_OP(muluhw)
1198
IWMMXT_OP(macsw)
1199
IWMMXT_OP(macuw)
1200

    
1201
IWMMXT_OP_ENV_SIZE(unpackl)
1202
IWMMXT_OP_ENV_SIZE(unpackh)
1203

    
1204
IWMMXT_OP_ENV1(unpacklub)
1205
IWMMXT_OP_ENV1(unpackluw)
1206
IWMMXT_OP_ENV1(unpacklul)
1207
IWMMXT_OP_ENV1(unpackhub)
1208
IWMMXT_OP_ENV1(unpackhuw)
1209
IWMMXT_OP_ENV1(unpackhul)
1210
IWMMXT_OP_ENV1(unpacklsb)
1211
IWMMXT_OP_ENV1(unpacklsw)
1212
IWMMXT_OP_ENV1(unpacklsl)
1213
IWMMXT_OP_ENV1(unpackhsb)
1214
IWMMXT_OP_ENV1(unpackhsw)
1215
IWMMXT_OP_ENV1(unpackhsl)
1216

    
1217
IWMMXT_OP_ENV_SIZE(cmpeq)
1218
IWMMXT_OP_ENV_SIZE(cmpgtu)
1219
IWMMXT_OP_ENV_SIZE(cmpgts)
1220

    
1221
IWMMXT_OP_ENV_SIZE(mins)
1222
IWMMXT_OP_ENV_SIZE(minu)
1223
IWMMXT_OP_ENV_SIZE(maxs)
1224
IWMMXT_OP_ENV_SIZE(maxu)
1225

    
1226
IWMMXT_OP_ENV_SIZE(subn)
1227
IWMMXT_OP_ENV_SIZE(addn)
1228
IWMMXT_OP_ENV_SIZE(subu)
1229
IWMMXT_OP_ENV_SIZE(addu)
1230
IWMMXT_OP_ENV_SIZE(subs)
1231
IWMMXT_OP_ENV_SIZE(adds)
1232

    
1233
IWMMXT_OP_ENV(avgb0)
1234
IWMMXT_OP_ENV(avgb1)
1235
IWMMXT_OP_ENV(avgw0)
1236
IWMMXT_OP_ENV(avgw1)
1237

    
1238
IWMMXT_OP(msadb)
1239

    
1240
IWMMXT_OP_ENV(packuw)
1241
IWMMXT_OP_ENV(packul)
1242
IWMMXT_OP_ENV(packuq)
1243
IWMMXT_OP_ENV(packsw)
1244
IWMMXT_OP_ENV(packsl)
1245
IWMMXT_OP_ENV(packsq)
1246

    
1247
static void gen_op_iwmmxt_set_mup(void)
1248
{
1249
    TCGv tmp;
1250
    tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1251
    tcg_gen_ori_i32(tmp, tmp, 2);
1252
    store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1253
}
1254

    
1255
static void gen_op_iwmmxt_set_cup(void)
1256
{
1257
    TCGv tmp;
1258
    tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1259
    tcg_gen_ori_i32(tmp, tmp, 1);
1260
    store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1261
}
1262

    
1263
static void gen_op_iwmmxt_setpsr_nz(void)
1264
{
1265
    TCGv tmp = new_tmp();
1266
    gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1267
    store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1268
}
1269

    
1270
static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1271
{
1272
    iwmmxt_load_reg(cpu_V1, rn);
1273
    tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1274
    tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1275
}
1276

    
1277
static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1278
{
1279
    int rd;
1280
    uint32_t offset;
1281
    TCGv tmp;
1282

    
1283
    rd = (insn >> 16) & 0xf;
1284
    tmp = load_reg(s, rd);
1285

    
1286
    offset = (insn & 0xff) << ((insn >> 7) & 2);
1287
    if (insn & (1 << 24)) {
1288
        /* Pre indexed */
1289
        if (insn & (1 << 23))
1290
            tcg_gen_addi_i32(tmp, tmp, offset);
1291
        else
1292
            tcg_gen_addi_i32(tmp, tmp, -offset);
1293
        tcg_gen_mov_i32(dest, tmp);
1294
        if (insn & (1 << 21))
1295
            store_reg(s, rd, tmp);
1296
        else
1297
            dead_tmp(tmp);
1298
    } else if (insn & (1 << 21)) {
1299
        /* Post indexed */
1300
        tcg_gen_mov_i32(dest, tmp);
1301
        if (insn & (1 << 23))
1302
            tcg_gen_addi_i32(tmp, tmp, offset);
1303
        else
1304
            tcg_gen_addi_i32(tmp, tmp, -offset);
1305
        store_reg(s, rd, tmp);
1306
    } else if (!(insn & (1 << 23)))
1307
        return 1;
1308
    return 0;
1309
}
1310

    
1311
static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1312
{
1313
    int rd = (insn >> 0) & 0xf;
1314
    TCGv tmp;
1315

    
1316
    if (insn & (1 << 8)) {
1317
        if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1318
            return 1;
1319
        } else {
1320
            tmp = iwmmxt_load_creg(rd);
1321
        }
1322
    } else {
1323
        tmp = new_tmp();
1324
        iwmmxt_load_reg(cpu_V0, rd);
1325
        tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1326
    }
1327
    tcg_gen_andi_i32(tmp, tmp, mask);
1328
    tcg_gen_mov_i32(dest, tmp);
1329
    dead_tmp(tmp);
1330
    return 0;
1331
}
1332

    
1333
/* Disassemble an iwMMXt instruction.  Returns nonzero if an error occured
1334
   (ie. an undefined instruction).  */
1335
static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1336
{
1337
    int rd, wrd;
1338
    int rdhi, rdlo, rd0, rd1, i;
1339
    TCGv addr;
1340
    TCGv tmp, tmp2, tmp3;
1341

    
1342
    if ((insn & 0x0e000e00) == 0x0c000000) {
1343
        if ((insn & 0x0fe00ff0) == 0x0c400000) {
1344
            wrd = insn & 0xf;
1345
            rdlo = (insn >> 12) & 0xf;
1346
            rdhi = (insn >> 16) & 0xf;
1347
            if (insn & ARM_CP_RW_BIT) {                        /* TMRRC */
1348
                iwmmxt_load_reg(cpu_V0, wrd);
1349
                tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1350
                tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1351
                tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1352
            } else {                                        /* TMCRR */
1353
                tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1354
                iwmmxt_store_reg(cpu_V0, wrd);
1355
                gen_op_iwmmxt_set_mup();
1356
            }
1357
            return 0;
1358
        }
1359

    
1360
        wrd = (insn >> 12) & 0xf;
1361
        addr = new_tmp();
1362
        if (gen_iwmmxt_address(s, insn, addr)) {
1363
            dead_tmp(addr);
1364
            return 1;
1365
        }
1366
        if (insn & ARM_CP_RW_BIT) {
1367
            if ((insn >> 28) == 0xf) {                        /* WLDRW wCx */
1368
                tmp = new_tmp();
1369
                tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1370
                iwmmxt_store_creg(wrd, tmp);
1371
            } else {
1372
                i = 1;
1373
                if (insn & (1 << 8)) {
1374
                    if (insn & (1 << 22)) {                /* WLDRD */
1375
                        tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1376
                        i = 0;
1377
                    } else {                                /* WLDRW wRd */
1378
                        tmp = gen_ld32(addr, IS_USER(s));
1379
                    }
1380
                } else {
1381
                    if (insn & (1 << 22)) {                /* WLDRH */
1382
                        tmp = gen_ld16u(addr, IS_USER(s));
1383
                    } else {                                /* WLDRB */
1384
                        tmp = gen_ld8u(addr, IS_USER(s));
1385
                    }
1386
                }
1387
                if (i) {
1388
                    tcg_gen_extu_i32_i64(cpu_M0, tmp);
1389
                    dead_tmp(tmp);
1390
                }
1391
                gen_op_iwmmxt_movq_wRn_M0(wrd);
1392
            }
1393
        } else {
1394
            if ((insn >> 28) == 0xf) {                        /* WSTRW wCx */
1395
                tmp = iwmmxt_load_creg(wrd);
1396
                gen_st32(tmp, addr, IS_USER(s));
1397
            } else {
1398
                gen_op_iwmmxt_movq_M0_wRn(wrd);
1399
                tmp = new_tmp();
1400
                if (insn & (1 << 8)) {
1401
                    if (insn & (1 << 22)) {                /* WSTRD */
1402
                        dead_tmp(tmp);
1403
                        tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1404
                    } else {                                /* WSTRW wRd */
1405
                        tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1406
                        gen_st32(tmp, addr, IS_USER(s));
1407
                    }
1408
                } else {
1409
                    if (insn & (1 << 22)) {                /* WSTRH */
1410
                        tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1411
                        gen_st16(tmp, addr, IS_USER(s));
1412
                    } else {                                /* WSTRB */
1413
                        tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1414
                        gen_st8(tmp, addr, IS_USER(s));
1415
                    }
1416
                }
1417
            }
1418
        }
1419
        dead_tmp(addr);
1420
        return 0;
1421
    }
1422

    
1423
    if ((insn & 0x0f000000) != 0x0e000000)
1424
        return 1;
1425

    
1426
    switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1427
    case 0x000:                                                /* WOR */
1428
        wrd = (insn >> 12) & 0xf;
1429
        rd0 = (insn >> 0) & 0xf;
1430
        rd1 = (insn >> 16) & 0xf;
1431
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1432
        gen_op_iwmmxt_orq_M0_wRn(rd1);
1433
        gen_op_iwmmxt_setpsr_nz();
1434
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1435
        gen_op_iwmmxt_set_mup();
1436
        gen_op_iwmmxt_set_cup();
1437
        break;
1438
    case 0x011:                                                /* TMCR */
1439
        if (insn & 0xf)
1440
            return 1;
1441
        rd = (insn >> 12) & 0xf;
1442
        wrd = (insn >> 16) & 0xf;
1443
        switch (wrd) {
1444
        case ARM_IWMMXT_wCID:
1445
        case ARM_IWMMXT_wCASF:
1446
            break;
1447
        case ARM_IWMMXT_wCon:
1448
            gen_op_iwmmxt_set_cup();
1449
            /* Fall through.  */
1450
        case ARM_IWMMXT_wCSSF:
1451
            tmp = iwmmxt_load_creg(wrd);
1452
            tmp2 = load_reg(s, rd);
1453
            tcg_gen_andc_i32(tmp, tmp, tmp2);
1454
            dead_tmp(tmp2);
1455
            iwmmxt_store_creg(wrd, tmp);
1456
            break;
1457
        case ARM_IWMMXT_wCGR0:
1458
        case ARM_IWMMXT_wCGR1:
1459
        case ARM_IWMMXT_wCGR2:
1460
        case ARM_IWMMXT_wCGR3:
1461
            gen_op_iwmmxt_set_cup();
1462
            tmp = load_reg(s, rd);
1463
            iwmmxt_store_creg(wrd, tmp);
1464
            break;
1465
        default:
1466
            return 1;
1467
        }
1468
        break;
1469
    case 0x100:                                                /* WXOR */
1470
        wrd = (insn >> 12) & 0xf;
1471
        rd0 = (insn >> 0) & 0xf;
1472
        rd1 = (insn >> 16) & 0xf;
1473
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1474
        gen_op_iwmmxt_xorq_M0_wRn(rd1);
1475
        gen_op_iwmmxt_setpsr_nz();
1476
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1477
        gen_op_iwmmxt_set_mup();
1478
        gen_op_iwmmxt_set_cup();
1479
        break;
1480
    case 0x111:                                                /* TMRC */
1481
        if (insn & 0xf)
1482
            return 1;
1483
        rd = (insn >> 12) & 0xf;
1484
        wrd = (insn >> 16) & 0xf;
1485
        tmp = iwmmxt_load_creg(wrd);
1486
        store_reg(s, rd, tmp);
1487
        break;
1488
    case 0x300:                                                /* WANDN */
1489
        wrd = (insn >> 12) & 0xf;
1490
        rd0 = (insn >> 0) & 0xf;
1491
        rd1 = (insn >> 16) & 0xf;
1492
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1493
        tcg_gen_neg_i64(cpu_M0, cpu_M0);
1494
        gen_op_iwmmxt_andq_M0_wRn(rd1);
1495
        gen_op_iwmmxt_setpsr_nz();
1496
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1497
        gen_op_iwmmxt_set_mup();
1498
        gen_op_iwmmxt_set_cup();
1499
        break;
1500
    case 0x200:                                                /* WAND */
1501
        wrd = (insn >> 12) & 0xf;
1502
        rd0 = (insn >> 0) & 0xf;
1503
        rd1 = (insn >> 16) & 0xf;
1504
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1505
        gen_op_iwmmxt_andq_M0_wRn(rd1);
1506
        gen_op_iwmmxt_setpsr_nz();
1507
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1508
        gen_op_iwmmxt_set_mup();
1509
        gen_op_iwmmxt_set_cup();
1510
        break;
1511
    case 0x810: case 0xa10:                                /* WMADD */
1512
        wrd = (insn >> 12) & 0xf;
1513
        rd0 = (insn >> 0) & 0xf;
1514
        rd1 = (insn >> 16) & 0xf;
1515
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1516
        if (insn & (1 << 21))
1517
            gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1518
        else
1519
            gen_op_iwmmxt_madduq_M0_wRn(rd1);
1520
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1521
        gen_op_iwmmxt_set_mup();
1522
        break;
1523
    case 0x10e: case 0x50e: case 0x90e: case 0xd0e:        /* WUNPCKIL */
1524
        wrd = (insn >> 12) & 0xf;
1525
        rd0 = (insn >> 16) & 0xf;
1526
        rd1 = (insn >> 0) & 0xf;
1527
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1528
        switch ((insn >> 22) & 3) {
1529
        case 0:
1530
            gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1531
            break;
1532
        case 1:
1533
            gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1534
            break;
1535
        case 2:
1536
            gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1537
            break;
1538
        case 3:
1539
            return 1;
1540
        }
1541
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1542
        gen_op_iwmmxt_set_mup();
1543
        gen_op_iwmmxt_set_cup();
1544
        break;
1545
    case 0x10c: case 0x50c: case 0x90c: case 0xd0c:        /* WUNPCKIH */
1546
        wrd = (insn >> 12) & 0xf;
1547
        rd0 = (insn >> 16) & 0xf;
1548
        rd1 = (insn >> 0) & 0xf;
1549
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1550
        switch ((insn >> 22) & 3) {
1551
        case 0:
1552
            gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1553
            break;
1554
        case 1:
1555
            gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1556
            break;
1557
        case 2:
1558
            gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1559
            break;
1560
        case 3:
1561
            return 1;
1562
        }
1563
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1564
        gen_op_iwmmxt_set_mup();
1565
        gen_op_iwmmxt_set_cup();
1566
        break;
1567
    case 0x012: case 0x112: case 0x412: case 0x512:        /* WSAD */
1568
        wrd = (insn >> 12) & 0xf;
1569
        rd0 = (insn >> 16) & 0xf;
1570
        rd1 = (insn >> 0) & 0xf;
1571
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1572
        if (insn & (1 << 22))
1573
            gen_op_iwmmxt_sadw_M0_wRn(rd1);
1574
        else
1575
            gen_op_iwmmxt_sadb_M0_wRn(rd1);
1576
        if (!(insn & (1 << 20)))
1577
            gen_op_iwmmxt_addl_M0_wRn(wrd);
1578
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1579
        gen_op_iwmmxt_set_mup();
1580
        break;
1581
    case 0x010: case 0x110: case 0x210: case 0x310:        /* WMUL */
1582
        wrd = (insn >> 12) & 0xf;
1583
        rd0 = (insn >> 16) & 0xf;
1584
        rd1 = (insn >> 0) & 0xf;
1585
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1586
        if (insn & (1 << 21)) {
1587
            if (insn & (1 << 20))
1588
                gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1589
            else
1590
                gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1591
        } else {
1592
            if (insn & (1 << 20))
1593
                gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1594
            else
1595
                gen_op_iwmmxt_mululw_M0_wRn(rd1);
1596
        }
1597
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1598
        gen_op_iwmmxt_set_mup();
1599
        break;
1600
    case 0x410: case 0x510: case 0x610: case 0x710:        /* WMAC */
1601
        wrd = (insn >> 12) & 0xf;
1602
        rd0 = (insn >> 16) & 0xf;
1603
        rd1 = (insn >> 0) & 0xf;
1604
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1605
        if (insn & (1 << 21))
1606
            gen_op_iwmmxt_macsw_M0_wRn(rd1);
1607
        else
1608
            gen_op_iwmmxt_macuw_M0_wRn(rd1);
1609
        if (!(insn & (1 << 20))) {
1610
            iwmmxt_load_reg(cpu_V1, wrd);
1611
            tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1612
        }
1613
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1614
        gen_op_iwmmxt_set_mup();
1615
        break;
1616
    case 0x006: case 0x406: case 0x806: case 0xc06:        /* WCMPEQ */
1617
        wrd = (insn >> 12) & 0xf;
1618
        rd0 = (insn >> 16) & 0xf;
1619
        rd1 = (insn >> 0) & 0xf;
1620
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1621
        switch ((insn >> 22) & 3) {
1622
        case 0:
1623
            gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1624
            break;
1625
        case 1:
1626
            gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1627
            break;
1628
        case 2:
1629
            gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1630
            break;
1631
        case 3:
1632
            return 1;
1633
        }
1634
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1635
        gen_op_iwmmxt_set_mup();
1636
        gen_op_iwmmxt_set_cup();
1637
        break;
1638
    case 0x800: case 0x900: case 0xc00: case 0xd00:        /* WAVG2 */
1639
        wrd = (insn >> 12) & 0xf;
1640
        rd0 = (insn >> 16) & 0xf;
1641
        rd1 = (insn >> 0) & 0xf;
1642
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1643
        if (insn & (1 << 22)) {
1644
            if (insn & (1 << 20))
1645
                gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1646
            else
1647
                gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1648
        } else {
1649
            if (insn & (1 << 20))
1650
                gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1651
            else
1652
                gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1653
        }
1654
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1655
        gen_op_iwmmxt_set_mup();
1656
        gen_op_iwmmxt_set_cup();
1657
        break;
1658
    case 0x802: case 0x902: case 0xa02: case 0xb02:        /* WALIGNR */
1659
        wrd = (insn >> 12) & 0xf;
1660
        rd0 = (insn >> 16) & 0xf;
1661
        rd1 = (insn >> 0) & 0xf;
1662
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1663
        tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1664
        tcg_gen_andi_i32(tmp, tmp, 7);
1665
        iwmmxt_load_reg(cpu_V1, rd1);
1666
        gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1667
        dead_tmp(tmp);
1668
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1669
        gen_op_iwmmxt_set_mup();
1670
        break;
1671
    case 0x601: case 0x605: case 0x609: case 0x60d:        /* TINSR */
1672
        if (((insn >> 6) & 3) == 3)
1673
            return 1;
1674
        rd = (insn >> 12) & 0xf;
1675
        wrd = (insn >> 16) & 0xf;
1676
        tmp = load_reg(s, rd);
1677
        gen_op_iwmmxt_movq_M0_wRn(wrd);
1678
        switch ((insn >> 6) & 3) {
1679
        case 0:
1680
            tmp2 = tcg_const_i32(0xff);
1681
            tmp3 = tcg_const_i32((insn & 7) << 3);
1682
            break;
1683
        case 1:
1684
            tmp2 = tcg_const_i32(0xffff);
1685
            tmp3 = tcg_const_i32((insn & 3) << 4);
1686
            break;
1687
        case 2:
1688
            tmp2 = tcg_const_i32(0xffffffff);
1689
            tmp3 = tcg_const_i32((insn & 1) << 5);
1690
            break;
1691
        default:
1692
            TCGV_UNUSED(tmp2);
1693
            TCGV_UNUSED(tmp3);
1694
        }
1695
        gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1696
        tcg_temp_free(tmp3);
1697
        tcg_temp_free(tmp2);
1698
        dead_tmp(tmp);
1699
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1700
        gen_op_iwmmxt_set_mup();
1701
        break;
1702
    case 0x107: case 0x507: case 0x907: case 0xd07:        /* TEXTRM */
1703
        rd = (insn >> 12) & 0xf;
1704
        wrd = (insn >> 16) & 0xf;
1705
        if (rd == 15 || ((insn >> 22) & 3) == 3)
1706
            return 1;
1707
        gen_op_iwmmxt_movq_M0_wRn(wrd);
1708
        tmp = new_tmp();
1709
        switch ((insn >> 22) & 3) {
1710
        case 0:
1711
            tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1712
            tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1713
            if (insn & 8) {
1714
                tcg_gen_ext8s_i32(tmp, tmp);
1715
            } else {
1716
                tcg_gen_andi_i32(tmp, tmp, 0xff);
1717
            }
1718
            break;
1719
        case 1:
1720
            tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1721
            tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1722
            if (insn & 8) {
1723
                tcg_gen_ext16s_i32(tmp, tmp);
1724
            } else {
1725
                tcg_gen_andi_i32(tmp, tmp, 0xffff);
1726
            }
1727
            break;
1728
        case 2:
1729
            tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1730
            tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1731
            break;
1732
        }
1733
        store_reg(s, rd, tmp);
1734
        break;
1735
    case 0x117: case 0x517: case 0x917: case 0xd17:        /* TEXTRC */
1736
        if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1737
            return 1;
1738
        tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1739
        switch ((insn >> 22) & 3) {
1740
        case 0:
1741
            tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1742
            break;
1743
        case 1:
1744
            tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1745
            break;
1746
        case 2:
1747
            tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1748
            break;
1749
        }
1750
        tcg_gen_shli_i32(tmp, tmp, 28);
1751
        gen_set_nzcv(tmp);
1752
        dead_tmp(tmp);
1753
        break;
1754
    case 0x401: case 0x405: case 0x409: case 0x40d:        /* TBCST */
1755
        if (((insn >> 6) & 3) == 3)
1756
            return 1;
1757
        rd = (insn >> 12) & 0xf;
1758
        wrd = (insn >> 16) & 0xf;
1759
        tmp = load_reg(s, rd);
1760
        switch ((insn >> 6) & 3) {
1761
        case 0:
1762
            gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1763
            break;
1764
        case 1:
1765
            gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1766
            break;
1767
        case 2:
1768
            gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1769
            break;
1770
        }
1771
        dead_tmp(tmp);
1772
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1773
        gen_op_iwmmxt_set_mup();
1774
        break;
1775
    case 0x113: case 0x513: case 0x913: case 0xd13:        /* TANDC */
1776
        if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1777
            return 1;
1778
        tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1779
        tmp2 = new_tmp();
1780
        tcg_gen_mov_i32(tmp2, tmp);
1781
        switch ((insn >> 22) & 3) {
1782
        case 0:
1783
            for (i = 0; i < 7; i ++) {
1784
                tcg_gen_shli_i32(tmp2, tmp2, 4);
1785
                tcg_gen_and_i32(tmp, tmp, tmp2);
1786
            }
1787
            break;
1788
        case 1:
1789
            for (i = 0; i < 3; i ++) {
1790
                tcg_gen_shli_i32(tmp2, tmp2, 8);
1791
                tcg_gen_and_i32(tmp, tmp, tmp2);
1792
            }
1793
            break;
1794
        case 2:
1795
            tcg_gen_shli_i32(tmp2, tmp2, 16);
1796
            tcg_gen_and_i32(tmp, tmp, tmp2);
1797
            break;
1798
        }
1799
        gen_set_nzcv(tmp);
1800
        dead_tmp(tmp2);
1801
        dead_tmp(tmp);
1802
        break;
1803
    case 0x01c: case 0x41c: case 0x81c: case 0xc1c:        /* WACC */
1804
        wrd = (insn >> 12) & 0xf;
1805
        rd0 = (insn >> 16) & 0xf;
1806
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1807
        switch ((insn >> 22) & 3) {
1808
        case 0:
1809
            gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1810
            break;
1811
        case 1:
1812
            gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1813
            break;
1814
        case 2:
1815
            gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1816
            break;
1817
        case 3:
1818
            return 1;
1819
        }
1820
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1821
        gen_op_iwmmxt_set_mup();
1822
        break;
1823
    case 0x115: case 0x515: case 0x915: case 0xd15:        /* TORC */
1824
        if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1825
            return 1;
1826
        tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1827
        tmp2 = new_tmp();
1828
        tcg_gen_mov_i32(tmp2, tmp);
1829
        switch ((insn >> 22) & 3) {
1830
        case 0:
1831
            for (i = 0; i < 7; i ++) {
1832
                tcg_gen_shli_i32(tmp2, tmp2, 4);
1833
                tcg_gen_or_i32(tmp, tmp, tmp2);
1834
            }
1835
            break;
1836
        case 1:
1837
            for (i = 0; i < 3; i ++) {
1838
                tcg_gen_shli_i32(tmp2, tmp2, 8);
1839
                tcg_gen_or_i32(tmp, tmp, tmp2);
1840
            }
1841
            break;
1842
        case 2:
1843
            tcg_gen_shli_i32(tmp2, tmp2, 16);
1844
            tcg_gen_or_i32(tmp, tmp, tmp2);
1845
            break;
1846
        }
1847
        gen_set_nzcv(tmp);
1848
        dead_tmp(tmp2);
1849
        dead_tmp(tmp);
1850
        break;
1851
    case 0x103: case 0x503: case 0x903: case 0xd03:        /* TMOVMSK */
1852
        rd = (insn >> 12) & 0xf;
1853
        rd0 = (insn >> 16) & 0xf;
1854
        if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1855
            return 1;
1856
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1857
        tmp = new_tmp();
1858
        switch ((insn >> 22) & 3) {
1859
        case 0:
1860
            gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1861
            break;
1862
        case 1:
1863
            gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1864
            break;
1865
        case 2:
1866
            gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1867
            break;
1868
        }
1869
        store_reg(s, rd, tmp);
1870
        break;
1871
    case 0x106: case 0x306: case 0x506: case 0x706:        /* WCMPGT */
1872
    case 0x906: case 0xb06: case 0xd06: case 0xf06:
1873
        wrd = (insn >> 12) & 0xf;
1874
        rd0 = (insn >> 16) & 0xf;
1875
        rd1 = (insn >> 0) & 0xf;
1876
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1877
        switch ((insn >> 22) & 3) {
1878
        case 0:
1879
            if (insn & (1 << 21))
1880
                gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1881
            else
1882
                gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1883
            break;
1884
        case 1:
1885
            if (insn & (1 << 21))
1886
                gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1887
            else
1888
                gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1889
            break;
1890
        case 2:
1891
            if (insn & (1 << 21))
1892
                gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1893
            else
1894
                gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1895
            break;
1896
        case 3:
1897
            return 1;
1898
        }
1899
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1900
        gen_op_iwmmxt_set_mup();
1901
        gen_op_iwmmxt_set_cup();
1902
        break;
1903
    case 0x00e: case 0x20e: case 0x40e: case 0x60e:        /* WUNPCKEL */
1904
    case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1905
        wrd = (insn >> 12) & 0xf;
1906
        rd0 = (insn >> 16) & 0xf;
1907
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1908
        switch ((insn >> 22) & 3) {
1909
        case 0:
1910
            if (insn & (1 << 21))
1911
                gen_op_iwmmxt_unpacklsb_M0();
1912
            else
1913
                gen_op_iwmmxt_unpacklub_M0();
1914
            break;
1915
        case 1:
1916
            if (insn & (1 << 21))
1917
                gen_op_iwmmxt_unpacklsw_M0();
1918
            else
1919
                gen_op_iwmmxt_unpackluw_M0();
1920
            break;
1921
        case 2:
1922
            if (insn & (1 << 21))
1923
                gen_op_iwmmxt_unpacklsl_M0();
1924
            else
1925
                gen_op_iwmmxt_unpacklul_M0();
1926
            break;
1927
        case 3:
1928
            return 1;
1929
        }
1930
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1931
        gen_op_iwmmxt_set_mup();
1932
        gen_op_iwmmxt_set_cup();
1933
        break;
1934
    case 0x00c: case 0x20c: case 0x40c: case 0x60c:        /* WUNPCKEH */
1935
    case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1936
        wrd = (insn >> 12) & 0xf;
1937
        rd0 = (insn >> 16) & 0xf;
1938
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1939
        switch ((insn >> 22) & 3) {
1940
        case 0:
1941
            if (insn & (1 << 21))
1942
                gen_op_iwmmxt_unpackhsb_M0();
1943
            else
1944
                gen_op_iwmmxt_unpackhub_M0();
1945
            break;
1946
        case 1:
1947
            if (insn & (1 << 21))
1948
                gen_op_iwmmxt_unpackhsw_M0();
1949
            else
1950
                gen_op_iwmmxt_unpackhuw_M0();
1951
            break;
1952
        case 2:
1953
            if (insn & (1 << 21))
1954
                gen_op_iwmmxt_unpackhsl_M0();
1955
            else
1956
                gen_op_iwmmxt_unpackhul_M0();
1957
            break;
1958
        case 3:
1959
            return 1;
1960
        }
1961
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1962
        gen_op_iwmmxt_set_mup();
1963
        gen_op_iwmmxt_set_cup();
1964
        break;
1965
    case 0x204: case 0x604: case 0xa04: case 0xe04:        /* WSRL */
1966
    case 0x214: case 0x614: case 0xa14: case 0xe14:
1967
        if (((insn >> 22) & 3) == 0)
1968
            return 1;
1969
        wrd = (insn >> 12) & 0xf;
1970
        rd0 = (insn >> 16) & 0xf;
1971
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1972
        tmp = new_tmp();
1973
        if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1974
            dead_tmp(tmp);
1975
            return 1;
1976
        }
1977
        switch ((insn >> 22) & 3) {
1978
        case 1:
1979
            gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
1980
            break;
1981
        case 2:
1982
            gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
1983
            break;
1984
        case 3:
1985
            gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
1986
            break;
1987
        }
1988
        dead_tmp(tmp);
1989
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1990
        gen_op_iwmmxt_set_mup();
1991
        gen_op_iwmmxt_set_cup();
1992
        break;
1993
    case 0x004: case 0x404: case 0x804: case 0xc04:        /* WSRA */
1994
    case 0x014: case 0x414: case 0x814: case 0xc14:
1995
        if (((insn >> 22) & 3) == 0)
1996
            return 1;
1997
        wrd = (insn >> 12) & 0xf;
1998
        rd0 = (insn >> 16) & 0xf;
1999
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2000
        tmp = new_tmp();
2001
        if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2002
            dead_tmp(tmp);
2003
            return 1;
2004
        }
2005
        switch ((insn >> 22) & 3) {
2006
        case 1:
2007
            gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2008
            break;
2009
        case 2:
2010
            gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2011
            break;
2012
        case 3:
2013
            gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2014
            break;
2015
        }
2016
        dead_tmp(tmp);
2017
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2018
        gen_op_iwmmxt_set_mup();
2019
        gen_op_iwmmxt_set_cup();
2020
        break;
2021
    case 0x104: case 0x504: case 0x904: case 0xd04:        /* WSLL */
2022
    case 0x114: case 0x514: case 0x914: case 0xd14:
2023
        if (((insn >> 22) & 3) == 0)
2024
            return 1;
2025
        wrd = (insn >> 12) & 0xf;
2026
        rd0 = (insn >> 16) & 0xf;
2027
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2028
        tmp = new_tmp();
2029
        if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2030
            dead_tmp(tmp);
2031
            return 1;
2032
        }
2033
        switch ((insn >> 22) & 3) {
2034
        case 1:
2035
            gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2036
            break;
2037
        case 2:
2038
            gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2039
            break;
2040
        case 3:
2041
            gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2042
            break;
2043
        }
2044
        dead_tmp(tmp);
2045
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2046
        gen_op_iwmmxt_set_mup();
2047
        gen_op_iwmmxt_set_cup();
2048
        break;
2049
    case 0x304: case 0x704: case 0xb04: case 0xf04:        /* WROR */
2050
    case 0x314: case 0x714: case 0xb14: case 0xf14:
2051
        if (((insn >> 22) & 3) == 0)
2052
            return 1;
2053
        wrd = (insn >> 12) & 0xf;
2054
        rd0 = (insn >> 16) & 0xf;
2055
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2056
        tmp = new_tmp();
2057
        switch ((insn >> 22) & 3) {
2058
        case 1:
2059
            if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2060
                dead_tmp(tmp);
2061
                return 1;
2062
            }
2063
            gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2064
            break;
2065
        case 2:
2066
            if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2067
                dead_tmp(tmp);
2068
                return 1;
2069
            }
2070
            gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2071
            break;
2072
        case 3:
2073
            if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2074
                dead_tmp(tmp);
2075
                return 1;
2076
            }
2077
            gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2078
            break;
2079
        }
2080
        dead_tmp(tmp);
2081
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2082
        gen_op_iwmmxt_set_mup();
2083
        gen_op_iwmmxt_set_cup();
2084
        break;
2085
    case 0x116: case 0x316: case 0x516: case 0x716:        /* WMIN */
2086
    case 0x916: case 0xb16: case 0xd16: case 0xf16:
2087
        wrd = (insn >> 12) & 0xf;
2088
        rd0 = (insn >> 16) & 0xf;
2089
        rd1 = (insn >> 0) & 0xf;
2090
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2091
        switch ((insn >> 22) & 3) {
2092
        case 0:
2093
            if (insn & (1 << 21))
2094
                gen_op_iwmmxt_minsb_M0_wRn(rd1);
2095
            else
2096
                gen_op_iwmmxt_minub_M0_wRn(rd1);
2097
            break;
2098
        case 1:
2099
            if (insn & (1 << 21))
2100
                gen_op_iwmmxt_minsw_M0_wRn(rd1);
2101
            else
2102
                gen_op_iwmmxt_minuw_M0_wRn(rd1);
2103
            break;
2104
        case 2:
2105
            if (insn & (1 << 21))
2106
                gen_op_iwmmxt_minsl_M0_wRn(rd1);
2107
            else
2108
                gen_op_iwmmxt_minul_M0_wRn(rd1);
2109
            break;
2110
        case 3:
2111
            return 1;
2112
        }
2113
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2114
        gen_op_iwmmxt_set_mup();
2115
        break;
2116
    case 0x016: case 0x216: case 0x416: case 0x616:        /* WMAX */
2117
    case 0x816: case 0xa16: case 0xc16: case 0xe16:
2118
        wrd = (insn >> 12) & 0xf;
2119
        rd0 = (insn >> 16) & 0xf;
2120
        rd1 = (insn >> 0) & 0xf;
2121
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2122
        switch ((insn >> 22) & 3) {
2123
        case 0:
2124
            if (insn & (1 << 21))
2125
                gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2126
            else
2127
                gen_op_iwmmxt_maxub_M0_wRn(rd1);
2128
            break;
2129
        case 1:
2130
            if (insn & (1 << 21))
2131
                gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2132
            else
2133
                gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2134
            break;
2135
        case 2:
2136
            if (insn & (1 << 21))
2137
                gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2138
            else
2139
                gen_op_iwmmxt_maxul_M0_wRn(rd1);
2140
            break;
2141
        case 3:
2142
            return 1;
2143
        }
2144
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2145
        gen_op_iwmmxt_set_mup();
2146
        break;
2147
    case 0x002: case 0x102: case 0x202: case 0x302:        /* WALIGNI */
2148
    case 0x402: case 0x502: case 0x602: case 0x702:
2149
        wrd = (insn >> 12) & 0xf;
2150
        rd0 = (insn >> 16) & 0xf;
2151
        rd1 = (insn >> 0) & 0xf;
2152
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2153
        tmp = tcg_const_i32((insn >> 20) & 3);
2154
        iwmmxt_load_reg(cpu_V1, rd1);
2155
        gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2156
        tcg_temp_free(tmp);
2157
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2158
        gen_op_iwmmxt_set_mup();
2159
        break;
2160
    case 0x01a: case 0x11a: case 0x21a: case 0x31a:        /* WSUB */
2161
    case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2162
    case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2163
    case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2164
        wrd = (insn >> 12) & 0xf;
2165
        rd0 = (insn >> 16) & 0xf;
2166
        rd1 = (insn >> 0) & 0xf;
2167
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2168
        switch ((insn >> 20) & 0xf) {
2169
        case 0x0:
2170
            gen_op_iwmmxt_subnb_M0_wRn(rd1);
2171
            break;
2172
        case 0x1:
2173
            gen_op_iwmmxt_subub_M0_wRn(rd1);
2174
            break;
2175
        case 0x3:
2176
            gen_op_iwmmxt_subsb_M0_wRn(rd1);
2177
            break;
2178
        case 0x4:
2179
            gen_op_iwmmxt_subnw_M0_wRn(rd1);
2180
            break;
2181
        case 0x5:
2182
            gen_op_iwmmxt_subuw_M0_wRn(rd1);
2183
            break;
2184
        case 0x7:
2185
            gen_op_iwmmxt_subsw_M0_wRn(rd1);
2186
            break;
2187
        case 0x8:
2188
            gen_op_iwmmxt_subnl_M0_wRn(rd1);
2189
            break;
2190
        case 0x9:
2191
            gen_op_iwmmxt_subul_M0_wRn(rd1);
2192
            break;
2193
        case 0xb:
2194
            gen_op_iwmmxt_subsl_M0_wRn(rd1);
2195
            break;
2196
        default:
2197
            return 1;
2198
        }
2199
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2200
        gen_op_iwmmxt_set_mup();
2201
        gen_op_iwmmxt_set_cup();
2202
        break;
2203
    case 0x01e: case 0x11e: case 0x21e: case 0x31e:        /* WSHUFH */
2204
    case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2205
    case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2206
    case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2207
        wrd = (insn >> 12) & 0xf;
2208
        rd0 = (insn >> 16) & 0xf;
2209
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2210
        tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2211
        gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2212
        tcg_temp_free(tmp);
2213
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2214
        gen_op_iwmmxt_set_mup();
2215
        gen_op_iwmmxt_set_cup();
2216
        break;
2217
    case 0x018: case 0x118: case 0x218: case 0x318:        /* WADD */
2218
    case 0x418: case 0x518: case 0x618: case 0x718:
2219
    case 0x818: case 0x918: case 0xa18: case 0xb18:
2220
    case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2221
        wrd = (insn >> 12) & 0xf;
2222
        rd0 = (insn >> 16) & 0xf;
2223
        rd1 = (insn >> 0) & 0xf;
2224
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2225
        switch ((insn >> 20) & 0xf) {
2226
        case 0x0:
2227
            gen_op_iwmmxt_addnb_M0_wRn(rd1);
2228
            break;
2229
        case 0x1:
2230
            gen_op_iwmmxt_addub_M0_wRn(rd1);
2231
            break;
2232
        case 0x3:
2233
            gen_op_iwmmxt_addsb_M0_wRn(rd1);
2234
            break;
2235
        case 0x4:
2236
            gen_op_iwmmxt_addnw_M0_wRn(rd1);
2237
            break;
2238
        case 0x5:
2239
            gen_op_iwmmxt_adduw_M0_wRn(rd1);
2240
            break;
2241
        case 0x7:
2242
            gen_op_iwmmxt_addsw_M0_wRn(rd1);
2243
            break;
2244
        case 0x8:
2245
            gen_op_iwmmxt_addnl_M0_wRn(rd1);
2246
            break;
2247
        case 0x9:
2248
            gen_op_iwmmxt_addul_M0_wRn(rd1);
2249
            break;
2250
        case 0xb:
2251
            gen_op_iwmmxt_addsl_M0_wRn(rd1);
2252
            break;
2253
        default:
2254
            return 1;
2255
        }
2256
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2257
        gen_op_iwmmxt_set_mup();
2258
        gen_op_iwmmxt_set_cup();
2259
        break;
2260
    case 0x008: case 0x108: case 0x208: case 0x308:        /* WPACK */
2261
    case 0x408: case 0x508: case 0x608: case 0x708:
2262
    case 0x808: case 0x908: case 0xa08: case 0xb08:
2263
    case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2264
        if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2265
            return 1;
2266
        wrd = (insn >> 12) & 0xf;
2267
        rd0 = (insn >> 16) & 0xf;
2268
        rd1 = (insn >> 0) & 0xf;
2269
        gen_op_iwmmxt_movq_M0_wRn(rd0);
2270
        switch ((insn >> 22) & 3) {
2271
        case 1:
2272
            if (insn & (1 << 21))
2273
                gen_op_iwmmxt_packsw_M0_wRn(rd1);
2274
            else
2275
                gen_op_iwmmxt_packuw_M0_wRn(rd1);
2276
            break;
2277
        case 2:
2278
            if (insn & (1 << 21))
2279
                gen_op_iwmmxt_packsl_M0_wRn(rd1);
2280
            else
2281
                gen_op_iwmmxt_packul_M0_wRn(rd1);
2282
            break;
2283
        case 3:
2284
            if (insn & (1 << 21))
2285
                gen_op_iwmmxt_packsq_M0_wRn(rd1);
2286
            else
2287
                gen_op_iwmmxt_packuq_M0_wRn(rd1);
2288
            break;
2289
        }
2290
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2291
        gen_op_iwmmxt_set_mup();
2292
        gen_op_iwmmxt_set_cup();
2293
        break;
2294
    case 0x201: case 0x203: case 0x205: case 0x207:
2295
    case 0x209: case 0x20b: case 0x20d: case 0x20f:
2296
    case 0x211: case 0x213: case 0x215: case 0x217:
2297
    case 0x219: case 0x21b: case 0x21d: case 0x21f:
2298
        wrd = (insn >> 5) & 0xf;
2299
        rd0 = (insn >> 12) & 0xf;
2300
        rd1 = (insn >> 0) & 0xf;
2301
        if (rd0 == 0xf || rd1 == 0xf)
2302
            return 1;
2303
        gen_op_iwmmxt_movq_M0_wRn(wrd);
2304
        tmp = load_reg(s, rd0);
2305
        tmp2 = load_reg(s, rd1);
2306
        switch ((insn >> 16) & 0xf) {
2307
        case 0x0:                                        /* TMIA */
2308
            gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2309
            break;
2310
        case 0x8:                                        /* TMIAPH */
2311
            gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2312
            break;
2313
        case 0xc: case 0xd: case 0xe: case 0xf:                /* TMIAxy */
2314
            if (insn & (1 << 16))
2315
                tcg_gen_shri_i32(tmp, tmp, 16);
2316
            if (insn & (1 << 17))
2317
                tcg_gen_shri_i32(tmp2, tmp2, 16);
2318
            gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2319
            break;
2320
        default:
2321
            dead_tmp(tmp2);
2322
            dead_tmp(tmp);
2323
            return 1;
2324
        }
2325
        dead_tmp(tmp2);
2326
        dead_tmp(tmp);
2327
        gen_op_iwmmxt_movq_wRn_M0(wrd);
2328
        gen_op_iwmmxt_set_mup();
2329
        break;
2330
    default:
2331
        return 1;
2332
    }
2333

    
2334
    return 0;
2335
}
2336

    
2337
/* Disassemble an XScale DSP instruction.  Returns nonzero if an error occured
2338
   (ie. an undefined instruction).  */
2339
static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2340
{
2341
    int acc, rd0, rd1, rdhi, rdlo;
2342
    TCGv tmp, tmp2;
2343

    
2344
    if ((insn & 0x0ff00f10) == 0x0e200010) {
2345
        /* Multiply with Internal Accumulate Format */
2346
        rd0 = (insn >> 12) & 0xf;
2347
        rd1 = insn & 0xf;
2348
        acc = (insn >> 5) & 7;
2349

    
2350
        if (acc != 0)
2351
            return 1;
2352

    
2353
        tmp = load_reg(s, rd0);
2354
        tmp2 = load_reg(s, rd1);
2355
        switch ((insn >> 16) & 0xf) {
2356
        case 0x0:                                        /* MIA */
2357
            gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2358
            break;
2359
        case 0x8:                                        /* MIAPH */
2360
            gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2361
            break;
2362
        case 0xc:                                        /* MIABB */
2363
        case 0xd:                                        /* MIABT */
2364
        case 0xe:                                        /* MIATB */
2365
        case 0xf:                                        /* MIATT */
2366
            if (insn & (1 << 16))
2367
                tcg_gen_shri_i32(tmp, tmp, 16);
2368
            if (insn & (1 << 17))
2369
                tcg_gen_shri_i32(tmp2, tmp2, 16);
2370
            gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2371
            break;
2372
        default:
2373
            return 1;
2374
        }
2375
        dead_tmp(tmp2);
2376
        dead_tmp(tmp);
2377

    
2378
        gen_op_iwmmxt_movq_wRn_M0(acc);
2379
        return 0;
2380
    }
2381

    
2382
    if ((insn & 0x0fe00ff8) == 0x0c400000) {
2383
        /* Internal Accumulator Access Format */
2384
        rdhi = (insn >> 16) & 0xf;
2385
        rdlo = (insn >> 12) & 0xf;
2386
        acc = insn & 7;
2387

    
2388
        if (acc != 0)
2389
            return 1;
2390

    
2391
        if (insn & ARM_CP_RW_BIT) {                        /* MRA */
2392
            iwmmxt_load_reg(cpu_V0, acc);
2393
            tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2394
            tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2395
            tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2396
            tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2397
        } else {                                        /* MAR */
2398
            tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2399
            iwmmxt_store_reg(cpu_V0, acc);
2400
        }
2401
        return 0;
2402
    }
2403

    
2404
    return 1;
2405
}
2406

    
2407
/* Disassemble system coprocessor instruction.  Return nonzero if
2408
   instruction is not defined.  */
2409
static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2410
{
2411
    TCGv tmp, tmp2;
2412
    uint32_t rd = (insn >> 12) & 0xf;
2413
    uint32_t cp = (insn >> 8) & 0xf;
2414
    if (IS_USER(s)) {
2415
        return 1;
2416
    }
2417

    
2418
    if (insn & ARM_CP_RW_BIT) {
2419
        if (!env->cp[cp].cp_read)
2420
            return 1;
2421
        gen_set_pc_im(s->pc);
2422
        tmp = new_tmp();
2423
        tmp2 = tcg_const_i32(insn);
2424
        gen_helper_get_cp(tmp, cpu_env, tmp2);
2425
        tcg_temp_free(tmp2);
2426
        store_reg(s, rd, tmp);
2427
    } else {
2428
        if (!env->cp[cp].cp_write)
2429
            return 1;
2430
        gen_set_pc_im(s->pc);
2431
        tmp = load_reg(s, rd);
2432
        tmp2 = tcg_const_i32(insn);
2433
        gen_helper_set_cp(cpu_env, tmp2, tmp);
2434
        tcg_temp_free(tmp2);
2435
        dead_tmp(tmp);
2436
    }
2437
    return 0;
2438
}
2439

    
2440
static int cp15_user_ok(uint32_t insn)
2441
{
2442
    int cpn = (insn >> 16) & 0xf;
2443
    int cpm = insn & 0xf;
2444
    int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2445

    
2446
    if (cpn == 13 && cpm == 0) {
2447
        /* TLS register.  */
2448
        if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2449
            return 1;
2450
    }
2451
    if (cpn == 7) {
2452
        /* ISB, DSB, DMB.  */
2453
        if ((cpm == 5 && op == 4)
2454
                || (cpm == 10 && (op == 4 || op == 5)))
2455
            return 1;
2456
    }
2457
    return 0;
2458
}
2459

    
2460
static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2461
{
2462
    TCGv tmp;
2463
    int cpn = (insn >> 16) & 0xf;
2464
    int cpm = insn & 0xf;
2465
    int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2466

    
2467
    if (!arm_feature(env, ARM_FEATURE_V6K))
2468
        return 0;
2469

    
2470
    if (!(cpn == 13 && cpm == 0))
2471
        return 0;
2472

    
2473
    if (insn & ARM_CP_RW_BIT) {
2474
        switch (op) {
2475
        case 2:
2476
            tmp = load_cpu_field(cp15.c13_tls1);
2477
            break;
2478
        case 3:
2479
            tmp = load_cpu_field(cp15.c13_tls2);
2480
            break;
2481
        case 4:
2482
            tmp = load_cpu_field(cp15.c13_tls3);
2483
            break;
2484
        default:
2485
            return 0;
2486
        }
2487
        store_reg(s, rd, tmp);
2488

    
2489
    } else {
2490
        tmp = load_reg(s, rd);
2491
        switch (op) {
2492
        case 2:
2493
            store_cpu_field(tmp, cp15.c13_tls1);
2494
            break;
2495
        case 3:
2496
            store_cpu_field(tmp, cp15.c13_tls2);
2497
            break;
2498
        case 4:
2499
            store_cpu_field(tmp, cp15.c13_tls3);
2500
            break;
2501
        default:
2502
            dead_tmp(tmp);
2503
            return 0;
2504
        }
2505
    }
2506
    return 1;
2507
}
2508

    
2509
/* Disassemble system coprocessor (cp15) instruction.  Return nonzero if
2510
   instruction is not defined.  */
2511
static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2512
{
2513
    uint32_t rd;
2514
    TCGv tmp, tmp2;
2515

    
2516
    /* M profile cores use memory mapped registers instead of cp15.  */
2517
    if (arm_feature(env, ARM_FEATURE_M))
2518
        return 1;
2519

    
2520
    if ((insn & (1 << 25)) == 0) {
2521
        if (insn & (1 << 20)) {
2522
            /* mrrc */
2523
            return 1;
2524
        }
2525
        /* mcrr.  Used for block cache operations, so implement as no-op.  */
2526
        return 0;
2527
    }
2528
    if ((insn & (1 << 4)) == 0) {
2529
        /* cdp */
2530
        return 1;
2531
    }
2532
    if (IS_USER(s) && !cp15_user_ok(insn)) {
2533
        return 1;
2534
    }
2535
    if ((insn & 0x0fff0fff) == 0x0e070f90
2536
        || (insn & 0x0fff0fff) == 0x0e070f58) {
2537
        /* Wait for interrupt.  */
2538
        gen_set_pc_im(s->pc);
2539
        s->is_jmp = DISAS_WFI;
2540
        return 0;
2541
    }
2542
    rd = (insn >> 12) & 0xf;
2543

    
2544
    if (cp15_tls_load_store(env, s, insn, rd))
2545
        return 0;
2546

    
2547
    tmp2 = tcg_const_i32(insn);
2548
    if (insn & ARM_CP_RW_BIT) {
2549
        tmp = new_tmp();
2550
        gen_helper_get_cp15(tmp, cpu_env, tmp2);
2551
        /* If the destination register is r15 then sets condition codes.  */
2552
        if (rd != 15)
2553
            store_reg(s, rd, tmp);
2554
        else
2555
            dead_tmp(tmp);
2556
    } else {
2557
        tmp = load_reg(s, rd);
2558
        gen_helper_set_cp15(cpu_env, tmp2, tmp);
2559
        dead_tmp(tmp);
2560
        /* Normally we would always end the TB here, but Linux
2561
         * arch/arm/mach-pxa/sleep.S expects two instructions following
2562
         * an MMU enable to execute from cache.  Imitate this behaviour.  */
2563
        if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2564
                (insn & 0x0fff0fff) != 0x0e010f10)
2565
            gen_lookup_tb(s);
2566
    }
2567
    tcg_temp_free_i32(tmp2);
2568
    return 0;
2569
}
2570

    
2571
#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2572
#define VFP_SREG(insn, bigbit, smallbit) \
2573
  ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2574
#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2575
    if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2576
        reg = (((insn) >> (bigbit)) & 0x0f) \
2577
              | (((insn) >> ((smallbit) - 4)) & 0x10); \
2578
    } else { \
2579
        if (insn & (1 << (smallbit))) \
2580
            return 1; \
2581
        reg = ((insn) >> (bigbit)) & 0x0f; \
2582
    }} while (0)
2583

    
2584
#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2585
#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2586
#define VFP_SREG_N(insn) VFP_SREG(insn, 16,  7)
2587
#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16,  7)
2588
#define VFP_SREG_M(insn) VFP_SREG(insn,  0,  5)
2589
#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn,  0,  5)
2590

    
2591
/* Move between integer and VFP cores.  */
2592
static TCGv gen_vfp_mrs(void)
2593
{
2594
    TCGv tmp = new_tmp();
2595
    tcg_gen_mov_i32(tmp, cpu_F0s);
2596
    return tmp;
2597
}
2598

    
2599
static void gen_vfp_msr(TCGv tmp)
2600
{
2601
    tcg_gen_mov_i32(cpu_F0s, tmp);
2602
    dead_tmp(tmp);
2603
}
2604

    
2605
static inline int
2606
vfp_enabled(CPUState * env)
2607
{
2608
    return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2609
}
2610

    
2611
static void gen_neon_dup_u8(TCGv var, int shift)
2612
{
2613
    TCGv tmp = new_tmp();
2614
    if (shift)
2615
        tcg_gen_shri_i32(var, var, shift);
2616
    tcg_gen_ext8u_i32(var, var);
2617
    tcg_gen_shli_i32(tmp, var, 8);
2618
    tcg_gen_or_i32(var, var, tmp);
2619
    tcg_gen_shli_i32(tmp, var, 16);
2620
    tcg_gen_or_i32(var, var, tmp);
2621
    dead_tmp(tmp);
2622
}
2623

    
2624
static void gen_neon_dup_low16(TCGv var)
2625
{
2626
    TCGv tmp = new_tmp();
2627
    tcg_gen_ext16u_i32(var, var);
2628
    tcg_gen_shli_i32(tmp, var, 16);
2629
    tcg_gen_or_i32(var, var, tmp);
2630
    dead_tmp(tmp);
2631
}
2632

    
2633
static void gen_neon_dup_high16(TCGv var)
2634
{
2635
    TCGv tmp = new_tmp();
2636
    tcg_gen_andi_i32(var, var, 0xffff0000);
2637
    tcg_gen_shri_i32(tmp, var, 16);
2638
    tcg_gen_or_i32(var, var, tmp);
2639
    dead_tmp(tmp);
2640
}
2641

    
2642
/* Disassemble a VFP instruction.  Returns nonzero if an error occured
2643
   (ie. an undefined instruction).  */
2644
static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2645
{
2646
    uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2647
    int dp, veclen;
2648
    TCGv addr;
2649
    TCGv tmp;
2650
    TCGv tmp2;
2651

    
2652
    if (!arm_feature(env, ARM_FEATURE_VFP))
2653
        return 1;
2654

    
2655
    if (!vfp_enabled(env)) {
2656
        /* VFP disabled.  Only allow fmxr/fmrx to/from some control regs.  */
2657
        if ((insn & 0x0fe00fff) != 0x0ee00a10)
2658
            return 1;
2659
        rn = (insn >> 16) & 0xf;
2660
        if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2661
            && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2662
            return 1;
2663
    }
2664
    dp = ((insn & 0xf00) == 0xb00);
2665
    switch ((insn >> 24) & 0xf) {
2666
    case 0xe:
2667
        if (insn & (1 << 4)) {
2668
            /* single register transfer */
2669
            rd = (insn >> 12) & 0xf;
2670
            if (dp) {
2671
                int size;
2672
                int pass;
2673

    
2674
                VFP_DREG_N(rn, insn);
2675
                if (insn & 0xf)
2676
                    return 1;
2677
                if (insn & 0x00c00060
2678
                    && !arm_feature(env, ARM_FEATURE_NEON))
2679
                    return 1;
2680

    
2681
                pass = (insn >> 21) & 1;
2682
                if (insn & (1 << 22)) {
2683
                    size = 0;
2684
                    offset = ((insn >> 5) & 3) * 8;
2685
                } else if (insn & (1 << 5)) {
2686
                    size = 1;
2687
                    offset = (insn & (1 << 6)) ? 16 : 0;
2688
                } else {
2689
                    size = 2;
2690
                    offset = 0;
2691
                }
2692
                if (insn & ARM_CP_RW_BIT) {
2693
                    /* vfp->arm */
2694
                    tmp = neon_load_reg(rn, pass);
2695
                    switch (size) {
2696
                    case 0:
2697
                        if (offset)
2698
                            tcg_gen_shri_i32(tmp, tmp, offset);
2699
                        if (insn & (1 << 23))
2700
                            gen_uxtb(tmp);
2701
                        else
2702
                            gen_sxtb(tmp);
2703
                        break;
2704
                    case 1:
2705
                        if (insn & (1 << 23)) {
2706
                            if (offset) {
2707
                                tcg_gen_shri_i32(tmp, tmp, 16);
2708
                            } else {
2709
                                gen_uxth(tmp);
2710
                            }
2711
                        } else {
2712
                            if (offset) {
2713
                                tcg_gen_sari_i32(tmp, tmp, 16);
2714
                            } else {
2715
                                gen_sxth(tmp);
2716
                            }
2717
                        }
2718
                        break;
2719
                    case 2:
2720
                        break;
2721
                    }
2722
                    store_reg(s, rd, tmp);
2723
                } else {
2724
                    /* arm->vfp */
2725
                    tmp = load_reg(s, rd);
2726
                    if (insn & (1 << 23)) {
2727
                        /* VDUP */
2728
                        if (size == 0) {
2729
                            gen_neon_dup_u8(tmp, 0);
2730
                        } else if (size == 1) {
2731
                            gen_neon_dup_low16(tmp);
2732
                        }
2733
                        for (n = 0; n <= pass * 2; n++) {
2734
                            tmp2 = new_tmp();
2735
                            tcg_gen_mov_i32(tmp2, tmp);
2736
                            neon_store_reg(rn, n, tmp2);
2737
                        }
2738
                        neon_store_reg(rn, n, tmp);
2739
                    } else {
2740
                        /* VMOV */
2741
                        switch (size) {
2742
                        case 0:
2743
                            tmp2 = neon_load_reg(rn, pass);
2744
                            gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2745
                            dead_tmp(tmp2);
2746
                            break;
2747
                        case 1:
2748
                            tmp2 = neon_load_reg(rn, pass);
2749
                            gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2750
                            dead_tmp(tmp2);
2751
                            break;
2752
                        case 2:
2753
                            break;
2754
                        }
2755
                        neon_store_reg(rn, pass, tmp);
2756
                    }
2757
                }
2758
            } else { /* !dp */
2759
                if ((insn & 0x6f) != 0x00)
2760
                    return 1;
2761
                rn = VFP_SREG_N(insn);
2762
                if (insn & ARM_CP_RW_BIT) {
2763
                    /* vfp->arm */
2764
                    if (insn & (1 << 21)) {
2765
                        /* system register */
2766
                        rn >>= 1;
2767

    
2768
                        switch (rn) {
2769
                        case ARM_VFP_FPSID:
2770
                            /* VFP2 allows access to FSID from userspace.
2771
                               VFP3 restricts all id registers to privileged
2772
                               accesses.  */
2773
                            if (IS_USER(s)
2774
                                && arm_feature(env, ARM_FEATURE_VFP3))
2775
                                return 1;
2776
                            tmp = load_cpu_field(vfp.xregs[rn]);
2777
                            break;
2778
                        case ARM_VFP_FPEXC:
2779
                            if (IS_USER(s))
2780
                                return 1;
2781
                            tmp = load_cpu_field(vfp.xregs[rn]);
2782
                            break;
2783
                        case ARM_VFP_FPINST:
2784
                        case ARM_VFP_FPINST2:
2785
                            /* Not present in VFP3.  */
2786
                            if (IS_USER(s)
2787
                                || arm_feature(env, ARM_FEATURE_VFP3))
2788
                                return 1;
2789
                            tmp = load_cpu_field(vfp.xregs[rn]);
2790
                            break;
2791
                        case ARM_VFP_FPSCR:
2792
                            if (rd == 15) {
2793
                                tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2794
                                tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2795
                            } else {
2796
                                tmp = new_tmp();
2797
                                gen_helper_vfp_get_fpscr(tmp, cpu_env);
2798
                            }
2799
                            break;
2800
                        case ARM_VFP_MVFR0:
2801
                        case ARM_VFP_MVFR1:
2802
                            if (IS_USER(s)
2803
                                || !arm_feature(env, ARM_FEATURE_VFP3))
2804
                                return 1;
2805
                            tmp = load_cpu_field(vfp.xregs[rn]);
2806
                            break;
2807
                        default:
2808
                            return 1;
2809
                        }
2810
                    } else {
2811
                        gen_mov_F0_vreg(0, rn);
2812
                        tmp = gen_vfp_mrs();
2813
                    }
2814
                    if (rd == 15) {
2815
                        /* Set the 4 flag bits in the CPSR.  */
2816
                        gen_set_nzcv(tmp);
2817
                        dead_tmp(tmp);
2818
                    } else {
2819
                        store_reg(s, rd, tmp);
2820
                    }
2821
                } else {
2822
                    /* arm->vfp */
2823
                    tmp = load_reg(s, rd);
2824
                    if (insn & (1 << 21)) {
2825
                        rn >>= 1;
2826
                        /* system register */
2827
                        switch (rn) {
2828
                        case ARM_VFP_FPSID:
2829
                        case ARM_VFP_MVFR0:
2830
                        case ARM_VFP_MVFR1:
2831
                            /* Writes are ignored.  */
2832
                            break;
2833
                        case ARM_VFP_FPSCR:
2834
                            gen_helper_vfp_set_fpscr(cpu_env, tmp);
2835
                            dead_tmp(tmp);
2836
                            gen_lookup_tb(s);
2837
                            break;
2838
                        case ARM_VFP_FPEXC:
2839
                            if (IS_USER(s))
2840
                                return 1;
2841
                            /* TODO: VFP subarchitecture support.
2842
                             * For now, keep the EN bit only */
2843
                            tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2844
                            store_cpu_field(tmp, vfp.xregs[rn]);
2845
                            gen_lookup_tb(s);
2846
                            break;
2847
                        case ARM_VFP_FPINST:
2848
                        case ARM_VFP_FPINST2:
2849
                            store_cpu_field(tmp, vfp.xregs[rn]);
2850
                            break;
2851
                        default:
2852
                            return 1;
2853
                        }
2854
                    } else {
2855
                        gen_vfp_msr(tmp);
2856
                        gen_mov_vreg_F0(0, rn);
2857
                    }
2858
                }
2859
            }
2860
        } else {
2861
            /* data processing */
2862
            /* The opcode is in bits 23, 21, 20 and 6.  */
2863
            op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2864
            if (dp) {
2865
                if (op == 15) {
2866
                    /* rn is opcode */
2867
                    rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2868
                } else {
2869
                    /* rn is register number */
2870
                    VFP_DREG_N(rn, insn);
2871
                }
2872

    
2873
                if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2874
                    /* Integer or single precision destination.  */
2875
                    rd = VFP_SREG_D(insn);
2876
                } else {
2877
                    VFP_DREG_D(rd, insn);
2878
                }
2879
                if (op == 15 &&
2880
                    (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2881
                    /* VCVT from int is always from S reg regardless of dp bit.
2882
                     * VCVT with immediate frac_bits has same format as SREG_M
2883
                     */
2884
                    rm = VFP_SREG_M(insn);
2885
                } else {
2886
                    VFP_DREG_M(rm, insn);
2887
                }
2888
            } else {
2889
                rn = VFP_SREG_N(insn);
2890
                if (op == 15 && rn == 15) {
2891
                    /* Double precision destination.  */
2892
                    VFP_DREG_D(rd, insn);
2893
                } else {
2894
                    rd = VFP_SREG_D(insn);
2895
                }
2896
                /* NB that we implicitly rely on the encoding for the frac_bits
2897
                 * in VCVT of fixed to float being the same as that of an SREG_M
2898
                 */
2899
                rm = VFP_SREG_M(insn);
2900
            }
2901

    
2902
            veclen = env->vfp.vec_len;
2903
            if (op == 15 && rn > 3)
2904
                veclen = 0;
2905

    
2906
            /* Shut up compiler warnings.  */
2907
            delta_m = 0;
2908
            delta_d = 0;
2909
            bank_mask = 0;
2910

    
2911
            if (veclen > 0) {
2912
                if (dp)
2913
                    bank_mask = 0xc;
2914
                else
2915
                    bank_mask = 0x18;
2916

    
2917
                /* Figure out what type of vector operation this is.  */
2918
                if ((rd & bank_mask) == 0) {
2919
                    /* scalar */
2920
                    veclen = 0;
2921
                } else {
2922
                    if (dp)
2923
                        delta_d = (env->vfp.vec_stride >> 1) + 1;
2924
                    else
2925
                        delta_d = env->vfp.vec_stride + 1;
2926

    
2927
                    if ((rm & bank_mask) == 0) {
2928
                        /* mixed scalar/vector */
2929
                        delta_m = 0;
2930
                    } else {
2931
                        /* vector */
2932
                        delta_m = delta_d;
2933
                    }
2934
                }
2935
            }
2936

    
2937
            /* Load the initial operands.  */
2938
            if (op == 15) {
2939
                switch (rn) {
2940
                case 16:
2941
                case 17:
2942
                    /* Integer source */
2943
                    gen_mov_F0_vreg(0, rm);
2944
                    break;
2945
                case 8:
2946
                case 9:
2947
                    /* Compare */
2948
                    gen_mov_F0_vreg(dp, rd);
2949
                    gen_mov_F1_vreg(dp, rm);
2950
                    break;
2951
                case 10:
2952
                case 11:
2953
                    /* Compare with zero */
2954
                    gen_mov_F0_vreg(dp, rd);
2955
                    gen_vfp_F1_ld0(dp);
2956
                    break;
2957
                case 20:
2958
                case 21:
2959
                case 22:
2960
                case 23:
2961
                case 28:
2962
                case 29:
2963
                case 30:
2964
                case 31:
2965
                    /* Source and destination the same.  */
2966
                    gen_mov_F0_vreg(dp, rd);
2967
                    break;
2968
                default:
2969
                    /* One source operand.  */
2970
                    gen_mov_F0_vreg(dp, rm);
2971
                    break;
2972
                }
2973
            } else {
2974
                /* Two source operands.  */
2975
                gen_mov_F0_vreg(dp, rn);
2976
                gen_mov_F1_vreg(dp, rm);
2977
            }
2978

    
2979
            for (;;) {
2980
                /* Perform the calculation.  */
2981
                switch (op) {
2982
                case 0: /* mac: fd + (fn * fm) */
2983
                    gen_vfp_mul(dp);
2984
                    gen_mov_F1_vreg(dp, rd);
2985
                    gen_vfp_add(dp);
2986
                    break;
2987
                case 1: /* nmac: fd - (fn * fm) */
2988
                    gen_vfp_mul(dp);
2989
                    gen_vfp_neg(dp);
2990
                    gen_mov_F1_vreg(dp, rd);
2991
                    gen_vfp_add(dp);
2992
                    break;
2993
                case 2: /* msc: -fd + (fn * fm) */
2994
                    gen_vfp_mul(dp);
2995
                    gen_mov_F1_vreg(dp, rd);
2996
                    gen_vfp_sub(dp);
2997
                    break;
2998
                case 3: /* nmsc: -fd - (fn * fm)  */
2999
                    gen_vfp_mul(dp);
3000
                    gen_vfp_neg(dp);
3001
                    gen_mov_F1_vreg(dp, rd);
3002
                    gen_vfp_sub(dp);
3003
                    break;
3004
                case 4: /* mul: fn * fm */
3005
                    gen_vfp_mul(dp);
3006
                    break;
3007
                case 5: /* nmul: -(fn * fm) */
3008
                    gen_vfp_mul(dp);
3009
                    gen_vfp_neg(dp);
3010
                    break;
3011
                case 6: /* add: fn + fm */
3012
                    gen_vfp_add(dp);
3013
                    break;
3014
                case 7: /* sub: fn - fm */
3015
                    gen_vfp_sub(dp);
3016
                    break;
3017
                case 8: /* div: fn / fm */
3018
                    gen_vfp_div(dp);
3019
                    break;
3020
                case 14: /* fconst */
3021
                    if (!arm_feature(env, ARM_FEATURE_VFP3))
3022
                      return 1;
3023

    
3024
                    n = (insn << 12) & 0x80000000;
3025
                    i = ((insn >> 12) & 0x70) | (insn & 0xf);
3026
                    if (dp) {
3027
                        if (i & 0x40)
3028
                            i |= 0x3f80;
3029
                        else
3030
                            i |= 0x4000;
3031
                        n |= i << 16;
3032
                        tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3033
                    } else {
3034
                        if (i & 0x40)
3035
                            i |= 0x780;
3036
                        else
3037
                            i |= 0x800;
3038
                        n |= i << 19;
3039
                        tcg_gen_movi_i32(cpu_F0s, n);
3040
                    }
3041
                    break;
3042
                case 15: /* extension space */
3043
                    switch (rn) {
3044
                    case 0: /* cpy */
3045
                        /* no-op */
3046
                        break;
3047
                    case 1: /* abs */
3048
                        gen_vfp_abs(dp);
3049
                        break;
3050
                    case 2: /* neg */
3051
                        gen_vfp_neg(dp);
3052
                        break;
3053
                    case 3: /* sqrt */
3054
                        gen_vfp_sqrt(dp);
3055
                        break;
3056
                    case 4: /* vcvtb.f32.f16 */
3057
                        if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3058
                          return 1;
3059
                        tmp = gen_vfp_mrs();
3060
                        tcg_gen_ext16u_i32(tmp, tmp);
3061
                        gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3062
                        dead_tmp(tmp);
3063
                        break;
3064
                    case 5: /* vcvtt.f32.f16 */
3065
                        if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3066
                          return 1;
3067
                        tmp = gen_vfp_mrs();
3068
                        tcg_gen_shri_i32(tmp, tmp, 16);
3069
                        gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3070
                        dead_tmp(tmp);
3071
                        break;
3072
                    case 6: /* vcvtb.f16.f32 */
3073
                        if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3074
                          return 1;
3075
                        tmp = new_tmp();
3076
                        gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3077
                        gen_mov_F0_vreg(0, rd);
3078
                        tmp2 = gen_vfp_mrs();
3079
                        tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3080
                        tcg_gen_or_i32(tmp, tmp, tmp2);
3081
                        dead_tmp(tmp2);
3082
                        gen_vfp_msr(tmp);
3083
                        break;
3084
                    case 7: /* vcvtt.f16.f32 */
3085
                        if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3086
                          return 1;
3087
                        tmp = new_tmp();
3088
                        gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3089
                        tcg_gen_shli_i32(tmp, tmp, 16);
3090
                        gen_mov_F0_vreg(0, rd);
3091
                        tmp2 = gen_vfp_mrs();
3092
                        tcg_gen_ext16u_i32(tmp2, tmp2);
3093
                        tcg_gen_or_i32(tmp, tmp, tmp2);
3094
                        dead_tmp(tmp2);
3095
                        gen_vfp_msr(tmp);
3096
                        break;
3097
                    case 8: /* cmp */
3098
                        gen_vfp_cmp(dp);
3099
                        break;
3100
                    case 9: /* cmpe */
3101
                        gen_vfp_cmpe(dp);
3102
                        break;
3103
                    case 10: /* cmpz */
3104
                        gen_vfp_cmp(dp);
3105
                        break;
3106
                    case 11: /* cmpez */
3107
                        gen_vfp_F1_ld0(dp);
3108
                        gen_vfp_cmpe(dp);
3109
                        break;
3110
                    case 15: /* single<->double conversion */
3111
                        if (dp)
3112
                            gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3113
                        else
3114
                            gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3115
                        break;
3116
                    case 16: /* fuito */
3117
                        gen_vfp_uito(dp);
3118
                        break;
3119
                    case 17: /* fsito */
3120
                        gen_vfp_sito(dp);
3121
                        break;
3122
                    case 20: /* fshto */
3123
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
3124
                          return 1;
3125
                        gen_vfp_shto(dp, 16 - rm);
3126
                        break;
3127
                    case 21: /* fslto */
3128
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
3129
                          return 1;
3130
                        gen_vfp_slto(dp, 32 - rm);
3131
                        break;
3132
                    case 22: /* fuhto */
3133
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
3134
                          return 1;
3135
                        gen_vfp_uhto(dp, 16 - rm);
3136
                        break;
3137
                    case 23: /* fulto */
3138
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
3139
                          return 1;
3140
                        gen_vfp_ulto(dp, 32 - rm);
3141
                        break;
3142
                    case 24: /* ftoui */
3143
                        gen_vfp_toui(dp);
3144
                        break;
3145
                    case 25: /* ftouiz */
3146
                        gen_vfp_touiz(dp);
3147
                        break;
3148
                    case 26: /* ftosi */
3149
                        gen_vfp_tosi(dp);
3150
                        break;
3151
                    case 27: /* ftosiz */
3152
                        gen_vfp_tosiz(dp);
3153
                        break;
3154
                    case 28: /* ftosh */
3155
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
3156
                          return 1;
3157
                        gen_vfp_tosh(dp, 16 - rm);
3158
                        break;
3159
                    case 29: /* ftosl */
3160
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
3161
                          return 1;
3162
                        gen_vfp_tosl(dp, 32 - rm);
3163
                        break;
3164
                    case 30: /* ftouh */
3165
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
3166
                          return 1;
3167
                        gen_vfp_touh(dp, 16 - rm);
3168
                        break;
3169
                    case 31: /* ftoul */
3170
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
3171
                          return 1;
3172
                        gen_vfp_toul(dp, 32 - rm);
3173
                        break;
3174
                    default: /* undefined */
3175
                        printf ("rn:%d\n", rn);
3176
                        return 1;
3177
                    }
3178
                    break;
3179
                default: /* undefined */
3180
                    printf ("op:%d\n", op);
3181
                    return 1;
3182
                }
3183

    
3184
                /* Write back the result.  */
3185
                if (op == 15 && (rn >= 8 && rn <= 11))
3186
                    ; /* Comparison, do nothing.  */
3187
                else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3188
                    /* VCVT double to int: always integer result. */
3189
                    gen_mov_vreg_F0(0, rd);
3190
                else if (op == 15 && rn == 15)
3191
                    /* conversion */
3192
                    gen_mov_vreg_F0(!dp, rd);
3193
                else
3194
                    gen_mov_vreg_F0(dp, rd);
3195

    
3196
                /* break out of the loop if we have finished  */
3197
                if (veclen == 0)
3198
                    break;
3199

    
3200
                if (op == 15 && delta_m == 0) {
3201
                    /* single source one-many */
3202
                    while (veclen--) {
3203
                        rd = ((rd + delta_d) & (bank_mask - 1))
3204
                             | (rd & bank_mask);
3205
                        gen_mov_vreg_F0(dp, rd);
3206
                    }
3207
                    break;
3208
                }
3209
                /* Setup the next operands.  */
3210
                veclen--;
3211
                rd = ((rd + delta_d) & (bank_mask - 1))
3212
                     | (rd & bank_mask);
3213

    
3214
                if (op == 15) {
3215
                    /* One source operand.  */
3216
                    rm = ((rm + delta_m) & (bank_mask - 1))
3217
                         | (rm & bank_mask);
3218
                    gen_mov_F0_vreg(dp, rm);
3219
                } else {
3220
                    /* Two source operands.  */
3221
                    rn = ((rn + delta_d) & (bank_mask - 1))
3222
                         | (rn & bank_mask);
3223
                    gen_mov_F0_vreg(dp, rn);
3224
                    if (delta_m) {
3225
                        rm = ((rm + delta_m) & (bank_mask - 1))
3226
                             | (rm & bank_mask);
3227
                        gen_mov_F1_vreg(dp, rm);
3228
                    }
3229
                }
3230
            }
3231
        }
3232
        break;
3233
    case 0xc:
3234
    case 0xd:
3235
        if (dp && (insn & 0x03e00000) == 0x00400000) {
3236
            /* two-register transfer */
3237
            rn = (insn >> 16) & 0xf;
3238
            rd = (insn >> 12) & 0xf;
3239
            if (dp) {
3240
                VFP_DREG_M(rm, insn);
3241
            } else {
3242
                rm = VFP_SREG_M(insn);
3243
            }
3244

    
3245
            if (insn & ARM_CP_RW_BIT) {
3246
                /* vfp->arm */
3247
                if (dp) {
3248
                    gen_mov_F0_vreg(0, rm * 2);
3249
                    tmp = gen_vfp_mrs();
3250
                    store_reg(s, rd, tmp);
3251
                    gen_mov_F0_vreg(0, rm * 2 + 1);
3252
                    tmp = gen_vfp_mrs();
3253
                    store_reg(s, rn, tmp);
3254
                } else {
3255
                    gen_mov_F0_vreg(0, rm);
3256
                    tmp = gen_vfp_mrs();
3257
                    store_reg(s, rn, tmp);
3258
                    gen_mov_F0_vreg(0, rm + 1);
3259
                    tmp = gen_vfp_mrs();
3260
                    store_reg(s, rd, tmp);
3261
                }
3262
            } else {
3263
                /* arm->vfp */
3264
                if (dp) {
3265
                    tmp = load_reg(s, rd);
3266
                    gen_vfp_msr(tmp);
3267
                    gen_mov_vreg_F0(0, rm * 2);
3268
                    tmp = load_reg(s, rn);
3269
                    gen_vfp_msr(tmp);
3270
                    gen_mov_vreg_F0(0, rm * 2 + 1);
3271
                } else {
3272
                    tmp = load_reg(s, rn);
3273
                    gen_vfp_msr(tmp);
3274
                    gen_mov_vreg_F0(0, rm);
3275
                    tmp = load_reg(s, rd);
3276
                    gen_vfp_msr(tmp);
3277
                    gen_mov_vreg_F0(0, rm + 1);
3278
                }
3279
            }
3280
        } else {
3281
            /* Load/store */
3282
            rn = (insn >> 16) & 0xf;
3283
            if (dp)
3284
                VFP_DREG_D(rd, insn);
3285
            else
3286
                rd = VFP_SREG_D(insn);
3287
            if (s->thumb && rn == 15) {
3288
                addr = new_tmp();
3289
                tcg_gen_movi_i32(addr, s->pc & ~2);
3290
            } else {
3291
                addr = load_reg(s, rn);
3292
            }
3293
            if ((insn & 0x01200000) == 0x01000000) {
3294
                /* Single load/store */
3295
                offset = (insn & 0xff) << 2;
3296
                if ((insn & (1 << 23)) == 0)
3297
                    offset = -offset;
3298
                tcg_gen_addi_i32(addr, addr, offset);
3299
                if (insn & (1 << 20)) {
3300
                    gen_vfp_ld(s, dp, addr);
3301
                    gen_mov_vreg_F0(dp, rd);
3302
                } else {
3303
                    gen_mov_F0_vreg(dp, rd);
3304
                    gen_vfp_st(s, dp, addr);
3305
                }
3306
                dead_tmp(addr);
3307
            } else {
3308
                /* load/store multiple */
3309
                if (dp)
3310
                    n = (insn >> 1) & 0x7f;
3311
                else
3312
                    n = insn & 0xff;
3313

    
3314
                if (insn & (1 << 24)) /* pre-decrement */
3315
                    tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3316

    
3317
                if (dp)
3318
                    offset = 8;
3319
                else
3320
                    offset = 4;
3321
                for (i = 0; i < n; i++) {
3322
                    if (insn & ARM_CP_RW_BIT) {
3323
                        /* load */
3324
                        gen_vfp_ld(s, dp, addr);
3325
                        gen_mov_vreg_F0(dp, rd + i);
3326
                    } else {
3327
                        /* store */
3328
                        gen_mov_F0_vreg(dp, rd + i);
3329
                        gen_vfp_st(s, dp, addr);
3330
                    }
3331
                    tcg_gen_addi_i32(addr, addr, offset);
3332
                }
3333
                if (insn & (1 << 21)) {
3334
                    /* writeback */
3335
                    if (insn & (1 << 24))
3336
                        offset = -offset * n;
3337
                    else if (dp && (insn & 1))
3338
                        offset = 4;
3339
                    else
3340
                        offset = 0;
3341

    
3342
                    if (offset != 0)
3343
                        tcg_gen_addi_i32(addr, addr, offset);
3344
                    store_reg(s, rn, addr);
3345
                } else {
3346
                    dead_tmp(addr);
3347
                }
3348
            }
3349
        }
3350
        break;
3351
    default:
3352
        /* Should never happen.  */
3353
        return 1;
3354
    }
3355
    return 0;
3356
}
3357

    
3358
static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3359
{
3360
    TranslationBlock *tb;
3361

    
3362
    tb = s->tb;
3363
    if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3364
        tcg_gen_goto_tb(n);
3365
        gen_set_pc_im(dest);
3366
        tcg_gen_exit_tb((long)tb + n);
3367
    } else {
3368
        gen_set_pc_im(dest);
3369
        tcg_gen_exit_tb(0);
3370
    }
3371
}
3372

    
3373
static inline void gen_jmp (DisasContext *s, uint32_t dest)
3374
{
3375
    if (unlikely(s->singlestep_enabled)) {
3376
        /* An indirect jump so that we still trigger the debug exception.  */
3377
        if (s->thumb)
3378
            dest |= 1;
3379
        gen_bx_im(s, dest);
3380
    } else {
3381
        gen_goto_tb(s, 0, dest);
3382
        s->is_jmp = DISAS_TB_JUMP;
3383
    }
3384
}
3385

    
3386
static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3387
{
3388
    if (x)
3389
        tcg_gen_sari_i32(t0, t0, 16);
3390
    else
3391
        gen_sxth(t0);
3392
    if (y)
3393
        tcg_gen_sari_i32(t1, t1, 16);
3394
    else
3395
        gen_sxth(t1);
3396
    tcg_gen_mul_i32(t0, t0, t1);
3397
}
3398

    
3399
/* Return the mask of PSR bits set by a MSR instruction.  */
3400
static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3401
    uint32_t mask;
3402

    
3403
    mask = 0;
3404
    if (flags & (1 << 0))
3405
        mask |= 0xff;
3406
    if (flags & (1 << 1))
3407
        mask |= 0xff00;
3408
    if (flags & (1 << 2))
3409
        mask |= 0xff0000;
3410
    if (flags & (1 << 3))
3411
        mask |= 0xff000000;
3412

    
3413
    /* Mask out undefined bits.  */
3414
    mask &= ~CPSR_RESERVED;
3415
    if (!arm_feature(env, ARM_FEATURE_V6))
3416
        mask &= ~(CPSR_E | CPSR_GE);
3417
    if (!arm_feature(env, ARM_FEATURE_THUMB2))
3418
        mask &= ~CPSR_IT;
3419
    /* Mask out execution state bits.  */
3420
    if (!spsr)
3421
        mask &= ~CPSR_EXEC;
3422
    /* Mask out privileged bits.  */
3423
    if (IS_USER(s))
3424
        mask &= CPSR_USER;
3425
    return mask;
3426
}
3427

    
3428
/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3429
static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3430
{
3431
    TCGv tmp;
3432
    if (spsr) {
3433
        /* ??? This is also undefined in system mode.  */
3434
        if (IS_USER(s))
3435
            return 1;
3436

    
3437
        tmp = load_cpu_field(spsr);
3438
        tcg_gen_andi_i32(tmp, tmp, ~mask);
3439
        tcg_gen_andi_i32(t0, t0, mask);
3440
        tcg_gen_or_i32(tmp, tmp, t0);
3441
        store_cpu_field(tmp, spsr);
3442
    } else {
3443
        gen_set_cpsr(t0, mask);
3444
    }
3445
    dead_tmp(t0);
3446
    gen_lookup_tb(s);
3447
    return 0;
3448
}
3449

    
3450
/* Returns nonzero if access to the PSR is not permitted.  */
3451
static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3452
{
3453
    TCGv tmp;
3454
    tmp = new_tmp();
3455
    tcg_gen_movi_i32(tmp, val);
3456
    return gen_set_psr(s, mask, spsr, tmp);
3457
}
3458

    
3459
/* Generate an old-style exception return. Marks pc as dead. */
3460
static void gen_exception_return(DisasContext *s, TCGv pc)
3461
{
3462
    TCGv tmp;
3463
    store_reg(s, 15, pc);
3464
    tmp = load_cpu_field(spsr);
3465
    gen_set_cpsr(tmp, 0xffffffff);
3466
    dead_tmp(tmp);
3467
    s->is_jmp = DISAS_UPDATE;
3468
}
3469

    
3470
/* Generate a v6 exception return.  Marks both values as dead.  */
3471
static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3472
{
3473
    gen_set_cpsr(cpsr, 0xffffffff);
3474
    dead_tmp(cpsr);
3475
    store_reg(s, 15, pc);
3476
    s->is_jmp = DISAS_UPDATE;
3477
}
3478

    
3479
static inline void
3480
gen_set_condexec (DisasContext *s)
3481
{
3482
    if (s->condexec_mask) {
3483
        uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3484
        TCGv tmp = new_tmp();
3485
        tcg_gen_movi_i32(tmp, val);
3486
        store_cpu_field(tmp, condexec_bits);
3487
    }
3488
}
3489

    
3490
static void gen_nop_hint(DisasContext *s, int val)
3491
{
3492
    switch (val) {
3493
    case 3: /* wfi */
3494
        gen_set_pc_im(s->pc);
3495
        s->is_jmp = DISAS_WFI;
3496
        break;
3497
    case 2: /* wfe */
3498
    case 4: /* sev */
3499
        /* TODO: Implement SEV and WFE.  May help SMP performance.  */
3500
    default: /* nop */
3501
        break;
3502
    }
3503
}
3504

    
3505
#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3506

    
3507
static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
3508
{
3509
    switch (size) {
3510
    case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3511
    case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3512
    case 2: tcg_gen_add_i32(t0, t0, t1); break;
3513
    default: return 1;
3514
    }
3515
    return 0;
3516
}
3517

    
3518
static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3519
{
3520
    switch (size) {
3521
    case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3522
    case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3523
    case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3524
    default: return;
3525
    }
3526
}
3527

    
3528
/* 32-bit pairwise ops end up the same as the elementwise versions.  */
3529
#define gen_helper_neon_pmax_s32  gen_helper_neon_max_s32
3530
#define gen_helper_neon_pmax_u32  gen_helper_neon_max_u32
3531
#define gen_helper_neon_pmin_s32  gen_helper_neon_min_s32
3532
#define gen_helper_neon_pmin_u32  gen_helper_neon_min_u32
3533

    
3534
/* FIXME: This is wrong.  They set the wrong overflow bit.  */
3535
#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3536
#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3537
#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3538
#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3539

    
3540
#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3541
    switch ((size << 1) | u) { \
3542
    case 0: \
3543
        gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3544
        break; \
3545
    case 1: \
3546
        gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3547
        break; \
3548
    case 2: \
3549
        gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3550
        break; \
3551
    case 3: \
3552
        gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3553
        break; \
3554
    case 4: \
3555
        gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3556
        break; \
3557
    case 5: \
3558
        gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3559
        break; \
3560
    default: return 1; \
3561
    }} while (0)
3562

    
3563
#define GEN_NEON_INTEGER_OP(name) do { \
3564
    switch ((size << 1) | u) { \
3565
    case 0: \
3566
        gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3567
        break; \
3568
    case 1: \
3569
        gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3570
        break; \
3571
    case 2: \
3572
        gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3573
        break; \
3574
    case 3: \
3575
        gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3576
        break; \
3577
    case 4: \
3578
        gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3579
        break; \
3580
    case 5: \
3581
        gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3582
        break; \
3583
    default: return 1; \
3584
    }} while (0)
3585

    
3586
static TCGv neon_load_scratch(int scratch)
3587
{
3588
    TCGv tmp = new_tmp();
3589
    tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3590
    return tmp;
3591
}
3592

    
3593
static void neon_store_scratch(int scratch, TCGv var)
3594
{
3595
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3596
    dead_tmp(var);
3597
}
3598

    
3599
static inline TCGv neon_get_scalar(int size, int reg)
3600
{
3601
    TCGv tmp;
3602
    if (size == 1) {
3603
        tmp = neon_load_reg(reg >> 1, reg & 1);
3604
    } else {
3605
        tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3606
        if (reg & 1) {
3607
            gen_neon_dup_low16(tmp);
3608
        } else {
3609
            gen_neon_dup_high16(tmp);
3610
        }
3611
    }
3612
    return tmp;
3613
}
3614

    
3615
static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3616
{
3617
    TCGv rd, rm, tmp;
3618

    
3619
    rd = new_tmp();
3620
    rm = new_tmp();
3621
    tmp = new_tmp();
3622

    
3623
    tcg_gen_andi_i32(rd, t0, 0xff);
3624
    tcg_gen_shri_i32(tmp, t0, 8);
3625
    tcg_gen_andi_i32(tmp, tmp, 0xff00);
3626
    tcg_gen_or_i32(rd, rd, tmp);
3627
    tcg_gen_shli_i32(tmp, t1, 16);
3628
    tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3629
    tcg_gen_or_i32(rd, rd, tmp);
3630
    tcg_gen_shli_i32(tmp, t1, 8);
3631
    tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3632
    tcg_gen_or_i32(rd, rd, tmp);
3633

    
3634
    tcg_gen_shri_i32(rm, t0, 8);
3635
    tcg_gen_andi_i32(rm, rm, 0xff);
3636
    tcg_gen_shri_i32(tmp, t0, 16);
3637
    tcg_gen_andi_i32(tmp, tmp, 0xff00);
3638
    tcg_gen_or_i32(rm, rm, tmp);
3639
    tcg_gen_shli_i32(tmp, t1, 8);
3640
    tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3641
    tcg_gen_or_i32(rm, rm, tmp);
3642
    tcg_gen_andi_i32(tmp, t1, 0xff000000);
3643
    tcg_gen_or_i32(t1, rm, tmp);
3644
    tcg_gen_mov_i32(t0, rd);
3645

    
3646
    dead_tmp(tmp);
3647
    dead_tmp(rm);
3648
    dead_tmp(rd);
3649
}
3650

    
3651
static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3652
{
3653
    TCGv rd, rm, tmp;
3654

    
3655
    rd = new_tmp();
3656
    rm = new_tmp();
3657
    tmp = new_tmp();
3658

    
3659
    tcg_gen_andi_i32(rd, t0, 0xff);
3660
    tcg_gen_shli_i32(tmp, t1, 8);
3661
    tcg_gen_andi_i32(tmp, tmp, 0xff00);
3662
    tcg_gen_or_i32(rd, rd, tmp);
3663
    tcg_gen_shli_i32(tmp, t0, 16);
3664
    tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3665
    tcg_gen_or_i32(rd, rd, tmp);
3666
    tcg_gen_shli_i32(tmp, t1, 24);
3667
    tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3668
    tcg_gen_or_i32(rd, rd, tmp);
3669

    
3670
    tcg_gen_andi_i32(rm, t1, 0xff000000);
3671
    tcg_gen_shri_i32(tmp, t0, 8);
3672
    tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3673
    tcg_gen_or_i32(rm, rm, tmp);
3674
    tcg_gen_shri_i32(tmp, t1, 8);
3675
    tcg_gen_andi_i32(tmp, tmp, 0xff00);
3676
    tcg_gen_or_i32(rm, rm, tmp);
3677
    tcg_gen_shri_i32(tmp, t0, 16);
3678
    tcg_gen_andi_i32(tmp, tmp, 0xff);
3679
    tcg_gen_or_i32(t1, rm, tmp);
3680
    tcg_gen_mov_i32(t0, rd);
3681

    
3682
    dead_tmp(tmp);
3683
    dead_tmp(rm);
3684
    dead_tmp(rd);
3685
}
3686

    
3687
static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3688
{
3689
    TCGv tmp, tmp2;
3690

    
3691
    tmp = new_tmp();
3692
    tmp2 = new_tmp();
3693

    
3694
    tcg_gen_andi_i32(tmp, t0, 0xffff);
3695
    tcg_gen_shli_i32(tmp2, t1, 16);
3696
    tcg_gen_or_i32(tmp, tmp, tmp2);
3697
    tcg_gen_andi_i32(t1, t1, 0xffff0000);
3698
    tcg_gen_shri_i32(tmp2, t0, 16);
3699
    tcg_gen_or_i32(t1, t1, tmp2);
3700
    tcg_gen_mov_i32(t0, tmp);
3701

    
3702
    dead_tmp(tmp2);
3703
    dead_tmp(tmp);
3704
}
3705

    
3706
static void gen_neon_unzip(int reg, int q, int tmp, int size)
3707
{
3708
    int n;
3709
    TCGv t0, t1;
3710

    
3711
    for (n = 0; n < q + 1; n += 2) {
3712
        t0 = neon_load_reg(reg, n);
3713
        t1 = neon_load_reg(reg, n + 1);
3714
        switch (size) {
3715
        case 0: gen_neon_unzip_u8(t0, t1); break;
3716
        case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same.  */
3717
        case 2: /* no-op */; break;
3718
        default: abort();
3719
        }
3720
        neon_store_scratch(tmp + n, t0);
3721
        neon_store_scratch(tmp + n + 1, t1);
3722
    }
3723
}
3724

    
3725
static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3726
{
3727
    TCGv rd, tmp;
3728

    
3729
    rd = new_tmp();
3730
    tmp = new_tmp();
3731

    
3732
    tcg_gen_shli_i32(rd, t0, 8);
3733
    tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3734
    tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3735
    tcg_gen_or_i32(rd, rd, tmp);
3736

    
3737
    tcg_gen_shri_i32(t1, t1, 8);
3738
    tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3739
    tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3740
    tcg_gen_or_i32(t1, t1, tmp);
3741
    tcg_gen_mov_i32(t0, rd);
3742

    
3743
    dead_tmp(tmp);
3744
    dead_tmp(rd);
3745
}
3746

    
3747
static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3748
{
3749
    TCGv rd, tmp;
3750

    
3751
    rd = new_tmp();
3752
    tmp = new_tmp();
3753

    
3754
    tcg_gen_shli_i32(rd, t0, 16);
3755
    tcg_gen_andi_i32(tmp, t1, 0xffff);
3756
    tcg_gen_or_i32(rd, rd, tmp);
3757
    tcg_gen_shri_i32(t1, t1, 16);
3758
    tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3759
    tcg_gen_or_i32(t1, t1, tmp);
3760
    tcg_gen_mov_i32(t0, rd);
3761

    
3762
    dead_tmp(tmp);
3763
    dead_tmp(rd);
3764
}
3765

    
3766

    
3767
static struct {
3768
    int nregs;
3769
    int interleave;
3770
    int spacing;
3771
} neon_ls_element_type[11] = {
3772
    {4, 4, 1},
3773
    {4, 4, 2},
3774
    {4, 1, 1},
3775
    {4, 2, 1},
3776
    {3, 3, 1},
3777
    {3, 3, 2},
3778
    {3, 1, 1},
3779
    {1, 1, 1},
3780
    {2, 2, 1},
3781
    {2, 2, 2},
3782
    {2, 1, 1}
3783
};
3784

    
3785
/* Translate a NEON load/store element instruction.  Return nonzero if the
3786
   instruction is invalid.  */
3787
static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3788
{
3789
    int rd, rn, rm;
3790
    int op;
3791
    int nregs;
3792
    int interleave;
3793
    int spacing;
3794
    int stride;
3795
    int size;
3796
    int reg;
3797
    int pass;
3798
    int load;
3799
    int shift;
3800
    int n;
3801
    TCGv addr;
3802
    TCGv tmp;
3803
    TCGv tmp2;
3804
    TCGv_i64 tmp64;
3805

    
3806
    if (!vfp_enabled(env))
3807
      return 1;
3808
    VFP_DREG_D(rd, insn);
3809
    rn = (insn >> 16) & 0xf;
3810
    rm = insn & 0xf;
3811
    load = (insn & (1 << 21)) != 0;
3812
    addr = new_tmp();
3813
    if ((insn & (1 << 23)) == 0) {
3814
        /* Load store all elements.  */
3815
        op = (insn >> 8) & 0xf;
3816
        size = (insn >> 6) & 3;
3817
        if (op > 10)
3818
            return 1;
3819
        nregs = neon_ls_element_type[op].nregs;
3820
        interleave = neon_ls_element_type[op].interleave;
3821
        spacing = neon_ls_element_type[op].spacing;
3822
        if (size == 3 && (interleave | spacing) != 1)
3823
            return 1;
3824
        load_reg_var(s, addr, rn);
3825
        stride = (1 << size) * interleave;
3826
        for (reg = 0; reg < nregs; reg++) {
3827
            if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3828
                load_reg_var(s, addr, rn);
3829
                tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3830
            } else if (interleave == 2 && nregs == 4 && reg == 2) {
3831
                load_reg_var(s, addr, rn);
3832
                tcg_gen_addi_i32(addr, addr, 1 << size);
3833
            }
3834
            if (size == 3) {
3835
                if (load) {
3836
                    tmp64 = gen_ld64(addr, IS_USER(s));
3837
                    neon_store_reg64(tmp64, rd);
3838
                    tcg_temp_free_i64(tmp64);
3839
                } else {
3840
                    tmp64 = tcg_temp_new_i64();
3841
                    neon_load_reg64(tmp64, rd);
3842
                    gen_st64(tmp64, addr, IS_USER(s));
3843
                }
3844
                tcg_gen_addi_i32(addr, addr, stride);
3845
            } else {
3846
                for (pass = 0; pass < 2; pass++) {
3847
                    if (size == 2) {
3848
                        if (load) {
3849
                            tmp = gen_ld32(addr, IS_USER(s));
3850
                            neon_store_reg(rd, pass, tmp);
3851
                        } else {
3852
                            tmp = neon_load_reg(rd, pass);
3853
                            gen_st32(tmp, addr, IS_USER(s));
3854
                        }
3855
                        tcg_gen_addi_i32(addr, addr, stride);
3856
                    } else if (size == 1) {
3857
                        if (load) {
3858
                            tmp = gen_ld16u(addr, IS_USER(s));
3859
                            tcg_gen_addi_i32(addr, addr, stride);
3860
                            tmp2 = gen_ld16u(addr, IS_USER(s));
3861
                            tcg_gen_addi_i32(addr, addr, stride);
3862
                            tcg_gen_shli_i32(tmp2, tmp2, 16);
3863
                            tcg_gen_or_i32(tmp, tmp, tmp2);
3864
                            dead_tmp(tmp2);
3865
                            neon_store_reg(rd, pass, tmp);
3866
                        } else {
3867
                            tmp = neon_load_reg(rd, pass);
3868
                            tmp2 = new_tmp();
3869
                            tcg_gen_shri_i32(tmp2, tmp, 16);
3870
                            gen_st16(tmp, addr, IS_USER(s));
3871
                            tcg_gen_addi_i32(addr, addr, stride);
3872
                            gen_st16(tmp2, addr, IS_USER(s));
3873
                            tcg_gen_addi_i32(addr, addr, stride);
3874
                        }
3875
                    } else /* size == 0 */ {
3876
                        if (load) {
3877
                            TCGV_UNUSED(tmp2);
3878
                            for (n = 0; n < 4; n++) {
3879
                                tmp = gen_ld8u(addr, IS_USER(s));
3880
                                tcg_gen_addi_i32(addr, addr, stride);
3881
                                if (n == 0) {
3882
                                    tmp2 = tmp;
3883
                                } else {
3884
                                    tcg_gen_shli_i32(tmp, tmp, n * 8);
3885
                                    tcg_gen_or_i32(tmp2, tmp2, tmp);
3886
                                    dead_tmp(tmp);
3887
                                }
3888
                            }
3889
                            neon_store_reg(rd, pass, tmp2);
3890
                        } else {
3891
                            tmp2 = neon_load_reg(rd, pass);
3892
                            for (n = 0; n < 4; n++) {
3893
                                tmp = new_tmp();
3894
                                if (n == 0) {
3895
                                    tcg_gen_mov_i32(tmp, tmp2);
3896
                                } else {
3897
                                    tcg_gen_shri_i32(tmp, tmp2, n * 8);
3898
                                }
3899
                                gen_st8(tmp, addr, IS_USER(s));
3900
                                tcg_gen_addi_i32(addr, addr, stride);
3901
                            }
3902
                            dead_tmp(tmp2);
3903
                        }
3904
                    }
3905
                }
3906
            }
3907
            rd += spacing;
3908
        }
3909
        stride = nregs * 8;
3910
    } else {
3911
        size = (insn >> 10) & 3;
3912
        if (size == 3) {
3913
            /* Load single element to all lanes.  */
3914
            if (!load)
3915
                return 1;
3916
            size = (insn >> 6) & 3;
3917
            nregs = ((insn >> 8) & 3) + 1;
3918
            stride = (insn & (1 << 5)) ? 2 : 1;
3919
            load_reg_var(s, addr, rn);
3920
            for (reg = 0; reg < nregs; reg++) {
3921
                switch (size) {
3922
                case 0:
3923
                    tmp = gen_ld8u(addr, IS_USER(s));
3924
                    gen_neon_dup_u8(tmp, 0);
3925
                    break;
3926
                case 1:
3927
                    tmp = gen_ld16u(addr, IS_USER(s));
3928
                    gen_neon_dup_low16(tmp);
3929
                    break;
3930
                case 2:
3931
                    tmp = gen_ld32(addr, IS_USER(s));
3932
                    break;
3933
                case 3:
3934
                    return 1;
3935
                default: /* Avoid compiler warnings.  */
3936
                    abort();
3937
                }
3938
                tcg_gen_addi_i32(addr, addr, 1 << size);
3939
                tmp2 = new_tmp();
3940
                tcg_gen_mov_i32(tmp2, tmp);
3941
                neon_store_reg(rd, 0, tmp2);
3942
                neon_store_reg(rd, 1, tmp);
3943
                rd += stride;
3944
            }
3945
            stride = (1 << size) * nregs;
3946
        } else {
3947
            /* Single element.  */
3948
            pass = (insn >> 7) & 1;
3949
            switch (size) {
3950
            case 0:
3951
                shift = ((insn >> 5) & 3) * 8;
3952
                stride = 1;
3953
                break;
3954
            case 1:
3955
                shift = ((insn >> 6) & 1) * 16;
3956
                stride = (insn & (1 << 5)) ? 2 : 1;
3957
                break;
3958
            case 2:
3959
                shift = 0;
3960
                stride = (insn & (1 << 6)) ? 2 : 1;
3961
                break;
3962
            default:
3963
                abort();
3964
            }
3965
            nregs = ((insn >> 8) & 3) + 1;
3966
            load_reg_var(s, addr, rn);
3967
            for (reg = 0; reg < nregs; reg++) {
3968
                if (load) {
3969
                    switch (size) {
3970
                    case 0:
3971
                        tmp = gen_ld8u(addr, IS_USER(s));
3972
                        break;
3973
                    case 1:
3974
                        tmp = gen_ld16u(addr, IS_USER(s));
3975
                        break;
3976
                    case 2:
3977
                        tmp = gen_ld32(addr, IS_USER(s));
3978
                        break;
3979
                    default: /* Avoid compiler warnings.  */
3980
                        abort();
3981
                    }
3982
                    if (size != 2) {
3983
                        tmp2 = neon_load_reg(rd, pass);
3984
                        gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3985
                        dead_tmp(tmp2);
3986
                    }
3987
                    neon_store_reg(rd, pass, tmp);
3988
                } else { /* Store */
3989
                    tmp = neon_load_reg(rd, pass);
3990
                    if (shift)
3991
                        tcg_gen_shri_i32(tmp, tmp, shift);
3992
                    switch (size) {
3993
                    case 0:
3994
                        gen_st8(tmp, addr, IS_USER(s));
3995
                        break;
3996
                    case 1:
3997
                        gen_st16(tmp, addr, IS_USER(s));
3998
                        break;
3999
                    case 2:
4000
                        gen_st32(tmp, addr, IS_USER(s));
4001
                        break;
4002
                    }
4003
                }
4004
                rd += stride;
4005
                tcg_gen_addi_i32(addr, addr, 1 << size);
4006
            }
4007
            stride = nregs * (1 << size);
4008
        }
4009
    }
4010
    dead_tmp(addr);
4011
    if (rm != 15) {
4012
        TCGv base;
4013

    
4014
        base = load_reg(s, rn);
4015
        if (rm == 13) {
4016
            tcg_gen_addi_i32(base, base, stride);
4017
        } else {
4018
            TCGv index;
4019
            index = load_reg(s, rm);
4020
            tcg_gen_add_i32(base, base, index);
4021
            dead_tmp(index);
4022
        }
4023
        store_reg(s, rn, base);
4024
    }
4025
    return 0;
4026
}
4027

    
4028
/* Bitwise select.  dest = c ? t : f.  Clobbers T and F.  */
4029
static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4030
{
4031
    tcg_gen_and_i32(t, t, c);
4032
    tcg_gen_andc_i32(f, f, c);
4033
    tcg_gen_or_i32(dest, t, f);
4034
}
4035

    
4036
static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4037
{
4038
    switch (size) {
4039
    case 0: gen_helper_neon_narrow_u8(dest, src); break;
4040
    case 1: gen_helper_neon_narrow_u16(dest, src); break;
4041
    case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4042
    default: abort();
4043
    }
4044
}
4045

    
4046
static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4047
{
4048
    switch (size) {
4049
    case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4050
    case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4051
    case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4052
    default: abort();
4053
    }
4054
}
4055

    
4056
static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4057
{
4058
    switch (size) {
4059
    case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4060
    case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4061
    case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4062
    default: abort();
4063
    }
4064
}
4065

    
4066
static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4067
                                         int q, int u)
4068
{
4069
    if (q) {
4070
        if (u) {
4071
            switch (size) {
4072
            case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4073
            case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4074
            default: abort();
4075
            }
4076
        } else {
4077
            switch (size) {
4078
            case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4079
            case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4080
            default: abort();
4081
            }
4082
        }
4083
    } else {
4084
        if (u) {
4085
            switch (size) {
4086
            case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4087
            case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4088
            default: abort();
4089
            }
4090
        } else {
4091
            switch (size) {
4092
            case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4093
            case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4094
            default: abort();
4095
            }
4096
        }
4097
    }
4098
}
4099

    
4100
static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4101
{
4102
    if (u) {
4103
        switch (size) {
4104
        case 0: gen_helper_neon_widen_u8(dest, src); break;
4105
        case 1: gen_helper_neon_widen_u16(dest, src); break;
4106
        case 2: tcg_gen_extu_i32_i64(dest, src); break;
4107
        default: abort();
4108
        }
4109
    } else {
4110
        switch (size) {
4111
        case 0: gen_helper_neon_widen_s8(dest, src); break;
4112
        case 1: gen_helper_neon_widen_s16(dest, src); break;
4113
        case 2: tcg_gen_ext_i32_i64(dest, src); break;
4114
        default: abort();
4115
        }
4116
    }
4117
    dead_tmp(src);
4118
}
4119

    
4120
static inline void gen_neon_addl(int size)
4121
{
4122
    switch (size) {
4123
    case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4124
    case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4125
    case 2: tcg_gen_add_i64(CPU_V001); break;
4126
    default: abort();
4127
    }
4128
}
4129

    
4130
static inline void gen_neon_subl(int size)
4131
{
4132
    switch (size) {
4133
    case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4134
    case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4135
    case 2: tcg_gen_sub_i64(CPU_V001); break;
4136
    default: abort();
4137
    }
4138
}
4139

    
4140
static inline void gen_neon_negl(TCGv_i64 var, int size)
4141
{
4142
    switch (size) {
4143
    case 0: gen_helper_neon_negl_u16(var, var); break;
4144
    case 1: gen_helper_neon_negl_u32(var, var); break;
4145
    case 2: gen_helper_neon_negl_u64(var, var); break;
4146
    default: abort();
4147
    }
4148
}
4149

    
4150
static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4151
{
4152
    switch (size) {
4153
    case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4154
    case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4155
    default: abort();
4156
    }
4157
}
4158

    
4159
static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4160
{
4161
    TCGv_i64 tmp;
4162

    
4163
    switch ((size << 1) | u) {
4164
    case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4165
    case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4166
    case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4167
    case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4168
    case 4:
4169
        tmp = gen_muls_i64_i32(a, b);
4170
        tcg_gen_mov_i64(dest, tmp);
4171
        break;
4172
    case 5:
4173
        tmp = gen_mulu_i64_i32(a, b);
4174
        tcg_gen_mov_i64(dest, tmp);
4175
        break;
4176
    default: abort();
4177
    }
4178
}
4179

    
4180
/* Translate a NEON data processing instruction.  Return nonzero if the
4181
   instruction is invalid.
4182
   We process data in a mixture of 32-bit and 64-bit chunks.
4183
   Mostly we use 32-bit chunks so we can use normal scalar instructions.  */
4184

    
4185
static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4186
{
4187
    int op;
4188
    int q;
4189
    int rd, rn, rm;
4190
    int size;
4191
    int shift;
4192
    int pass;
4193
    int count;
4194
    int pairwise;
4195
    int u;
4196
    int n;
4197
    uint32_t imm, mask;
4198
    TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4199
    TCGv_i64 tmp64;
4200

    
4201
    if (!vfp_enabled(env))
4202
      return 1;
4203
    q = (insn & (1 << 6)) != 0;
4204
    u = (insn >> 24) & 1;
4205
    VFP_DREG_D(rd, insn);
4206
    VFP_DREG_N(rn, insn);
4207
    VFP_DREG_M(rm, insn);
4208
    size = (insn >> 20) & 3;
4209
    if ((insn & (1 << 23)) == 0) {
4210
        /* Three register same length.  */
4211
        op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4212
        if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4213
                          || op == 10 || op  == 11 || op == 16)) {
4214
            /* 64-bit element instructions.  */
4215
            for (pass = 0; pass < (q ? 2 : 1); pass++) {
4216
                neon_load_reg64(cpu_V0, rn + pass);
4217
                neon_load_reg64(cpu_V1, rm + pass);
4218
                switch (op) {
4219
                case 1: /* VQADD */
4220
                    if (u) {
4221
                        gen_helper_neon_add_saturate_u64(CPU_V001);
4222
                    } else {
4223
                        gen_helper_neon_add_saturate_s64(CPU_V001);
4224
                    }
4225
                    break;
4226
                case 5: /* VQSUB */
4227
                    if (u) {
4228
                        gen_helper_neon_sub_saturate_u64(CPU_V001);
4229
                    } else {
4230
                        gen_helper_neon_sub_saturate_s64(CPU_V001);
4231
                    }
4232
                    break;
4233
                case 8: /* VSHL */
4234
                    if (u) {
4235
                        gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4236
                    } else {
4237
                        gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4238
                    }
4239
                    break;
4240
                case 9: /* VQSHL */
4241
                    if (u) {
4242
                        gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4243
                                                 cpu_V0, cpu_V0);
4244
                    } else {
4245
                        gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4246
                                                 cpu_V1, cpu_V0);
4247
                    }
4248
                    break;
4249
                case 10: /* VRSHL */
4250
                    if (u) {
4251
                        gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4252
                    } else {
4253
                        gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4254
                    }
4255
                    break;
4256
                case 11: /* VQRSHL */
4257
                    if (u) {
4258
                        gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4259
                                                  cpu_V1, cpu_V0);
4260
                    } else {
4261
                        gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4262
                                                  cpu_V1, cpu_V0);
4263
                    }
4264
                    break;
4265
                case 16:
4266
                    if (u) {
4267
                        tcg_gen_sub_i64(CPU_V001);
4268
                    } else {
4269
                        tcg_gen_add_i64(CPU_V001);
4270
                    }
4271
                    break;
4272
                default:
4273
                    abort();
4274
                }
4275
                neon_store_reg64(cpu_V0, rd + pass);
4276
            }
4277
            return 0;
4278
        }
4279
        switch (op) {
4280
        case 8: /* VSHL */
4281
        case 9: /* VQSHL */
4282
        case 10: /* VRSHL */
4283
        case 11: /* VQRSHL */
4284
            {
4285
                int rtmp;
4286
                /* Shift instruction operands are reversed.  */
4287
                rtmp = rn;
4288
                rn = rm;
4289
                rm = rtmp;
4290
                pairwise = 0;
4291
            }
4292
            break;
4293
        case 20: /* VPMAX */
4294
        case 21: /* VPMIN */
4295
        case 23: /* VPADD */
4296
            pairwise = 1;
4297
            break;
4298
        case 26: /* VPADD (float) */
4299
            pairwise = (u && size < 2);
4300
            break;
4301
        case 30: /* VPMIN/VPMAX (float) */
4302
            pairwise = u;
4303
            break;
4304
        default:
4305
            pairwise = 0;
4306
            break;
4307
        }
4308

    
4309
        for (pass = 0; pass < (q ? 4 : 2); pass++) {
4310

    
4311
        if (pairwise) {
4312
            /* Pairwise.  */
4313
            if (q)
4314
                n = (pass & 1) * 2;
4315
            else
4316
                n = 0;
4317
            if (pass < q + 1) {
4318
                tmp = neon_load_reg(rn, n);
4319
                tmp2 = neon_load_reg(rn, n + 1);
4320
            } else {
4321
                tmp = neon_load_reg(rm, n);
4322
                tmp2 = neon_load_reg(rm, n + 1);
4323
            }
4324
        } else {
4325
            /* Elementwise.  */
4326
            tmp = neon_load_reg(rn, pass);
4327
            tmp2 = neon_load_reg(rm, pass);
4328
        }
4329
        switch (op) {
4330
        case 0: /* VHADD */
4331
            GEN_NEON_INTEGER_OP(hadd);
4332
            break;
4333
        case 1: /* VQADD */
4334
            GEN_NEON_INTEGER_OP_ENV(qadd);
4335
            break;
4336
        case 2: /* VRHADD */
4337
            GEN_NEON_INTEGER_OP(rhadd);
4338
            break;
4339
        case 3: /* Logic ops.  */
4340
            switch ((u << 2) | size) {
4341
            case 0: /* VAND */
4342
                tcg_gen_and_i32(tmp, tmp, tmp2);
4343
                break;
4344
            case 1: /* BIC */
4345
                tcg_gen_andc_i32(tmp, tmp, tmp2);
4346
                break;
4347
            case 2: /* VORR */
4348
                tcg_gen_or_i32(tmp, tmp, tmp2);
4349
                break;
4350
            case 3: /* VORN */
4351
                tcg_gen_orc_i32(tmp, tmp, tmp2);
4352
                break;
4353
            case 4: /* VEOR */
4354
                tcg_gen_xor_i32(tmp, tmp, tmp2);
4355
                break;
4356
            case 5: /* VBSL */
4357
                tmp3 = neon_load_reg(rd, pass);
4358
                gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4359
                dead_tmp(tmp3);
4360
                break;
4361
            case 6: /* VBIT */
4362
                tmp3 = neon_load_reg(rd, pass);
4363
                gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4364
                dead_tmp(tmp3);
4365
                break;
4366
            case 7: /* VBIF */
4367
                tmp3 = neon_load_reg(rd, pass);
4368
                gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4369
                dead_tmp(tmp3);
4370
                break;
4371
            }
4372
            break;
4373
        case 4: /* VHSUB */
4374
            GEN_NEON_INTEGER_OP(hsub);
4375
            break;
4376
        case 5: /* VQSUB */
4377
            GEN_NEON_INTEGER_OP_ENV(qsub);
4378
            break;
4379
        case 6: /* VCGT */
4380
            GEN_NEON_INTEGER_OP(cgt);
4381
            break;
4382
        case 7: /* VCGE */
4383
            GEN_NEON_INTEGER_OP(cge);
4384
            break;
4385
        case 8: /* VSHL */
4386
            GEN_NEON_INTEGER_OP(shl);
4387
            break;
4388
        case 9: /* VQSHL */
4389
            GEN_NEON_INTEGER_OP_ENV(qshl);
4390
            break;
4391
        case 10: /* VRSHL */
4392
            GEN_NEON_INTEGER_OP(rshl);
4393
            break;
4394
        case 11: /* VQRSHL */
4395
            GEN_NEON_INTEGER_OP_ENV(qrshl);
4396
            break;
4397
        case 12: /* VMAX */
4398
            GEN_NEON_INTEGER_OP(max);
4399
            break;
4400
        case 13: /* VMIN */
4401
            GEN_NEON_INTEGER_OP(min);
4402
            break;
4403
        case 14: /* VABD */
4404
            GEN_NEON_INTEGER_OP(abd);
4405
            break;
4406
        case 15: /* VABA */
4407
            GEN_NEON_INTEGER_OP(abd);
4408
            dead_tmp(tmp2);
4409
            tmp2 = neon_load_reg(rd, pass);
4410
            gen_neon_add(size, tmp, tmp2);
4411
            break;
4412
        case 16:
4413
            if (!u) { /* VADD */
4414
                if (gen_neon_add(size, tmp, tmp2))
4415
                    return 1;
4416
            } else { /* VSUB */
4417
                switch (size) {
4418
                case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4419
                case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4420
                case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4421
                default: return 1;
4422
                }
4423
            }
4424
            break;
4425
        case 17:
4426
            if (!u) { /* VTST */
4427
                switch (size) {
4428
                case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4429
                case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4430
                case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4431
                default: return 1;
4432
                }
4433
            } else { /* VCEQ */
4434
                switch (size) {
4435
                case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4436
                case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4437
                case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4438
                default: return 1;
4439
                }
4440
            }
4441
            break;
4442
        case 18: /* Multiply.  */
4443
            switch (size) {
4444
            case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4445
            case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4446
            case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4447
            default: return 1;
4448
            }
4449
            dead_tmp(tmp2);
4450
            tmp2 = neon_load_reg(rd, pass);
4451
            if (u) { /* VMLS */
4452
                gen_neon_rsb(size, tmp, tmp2);
4453
            } else { /* VMLA */
4454
                gen_neon_add(size, tmp, tmp2);
4455
            }
4456
            break;
4457
        case 19: /* VMUL */
4458
            if (u) { /* polynomial */
4459
                gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4460
            } else { /* Integer */
4461
                switch (size) {
4462
                case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4463
                case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4464
                case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4465
                default: return 1;
4466
                }
4467
            }
4468
            break;
4469
        case 20: /* VPMAX */
4470
            GEN_NEON_INTEGER_OP(pmax);
4471
            break;
4472
        case 21: /* VPMIN */
4473
            GEN_NEON_INTEGER_OP(pmin);
4474
            break;
4475
        case 22: /* Hultiply high.  */
4476
            if (!u) { /* VQDMULH */
4477
                switch (size) {
4478
                case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4479
                case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4480
                default: return 1;
4481
                }
4482
            } else { /* VQRDHMUL */
4483
                switch (size) {
4484
                case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4485
                case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4486
                default: return 1;
4487
                }
4488
            }
4489
            break;
4490
        case 23: /* VPADD */
4491
            if (u)
4492
                return 1;
4493
            switch (size) {
4494
            case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4495
            case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4496
            case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4497
            default: return 1;
4498
            }
4499
            break;
4500
        case 26: /* Floating point arithnetic.  */
4501
            switch ((u << 2) | size) {
4502
            case 0: /* VADD */
4503
                gen_helper_neon_add_f32(tmp, tmp, tmp2);
4504
                break;
4505
            case 2: /* VSUB */
4506
                gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4507
                break;
4508
            case 4: /* VPADD */
4509
                gen_helper_neon_add_f32(tmp, tmp, tmp2);
4510
                break;
4511
            case 6: /* VABD */
4512
                gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4513
                break;
4514
            default:
4515
                return 1;
4516
            }
4517
            break;
4518
        case 27: /* Float multiply.  */
4519
            gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4520
            if (!u) {
4521
                dead_tmp(tmp2);
4522
                tmp2 = neon_load_reg(rd, pass);
4523
                if (size == 0) {
4524
                    gen_helper_neon_add_f32(tmp, tmp, tmp2);
4525
                } else {
4526
                    gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4527
                }
4528
            }
4529
            break;
4530
        case 28: /* Float compare.  */
4531
            if (!u) {
4532
                gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4533
            } else {
4534
                if (size == 0)
4535
                    gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4536
                else
4537
                    gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4538
            }
4539
            break;
4540
        case 29: /* Float compare absolute.  */
4541
            if (!u)
4542
                return 1;
4543
            if (size == 0)
4544
                gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4545
            else
4546
                gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4547
            break;
4548
        case 30: /* Float min/max.  */
4549
            if (size == 0)
4550
                gen_helper_neon_max_f32(tmp, tmp, tmp2);
4551
            else
4552
                gen_helper_neon_min_f32(tmp, tmp, tmp2);
4553
            break;
4554
        case 31:
4555
            if (size == 0)
4556
                gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4557
            else
4558
                gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4559
            break;
4560
        default:
4561
            abort();
4562
        }
4563
        dead_tmp(tmp2);
4564

    
4565
        /* Save the result.  For elementwise operations we can put it
4566
           straight into the destination register.  For pairwise operations
4567
           we have to be careful to avoid clobbering the source operands.  */
4568
        if (pairwise && rd == rm) {
4569
            neon_store_scratch(pass, tmp);
4570
        } else {
4571
            neon_store_reg(rd, pass, tmp);
4572
        }
4573

    
4574
        } /* for pass */
4575
        if (pairwise && rd == rm) {
4576
            for (pass = 0; pass < (q ? 4 : 2); pass++) {
4577
                tmp = neon_load_scratch(pass);
4578
                neon_store_reg(rd, pass, tmp);
4579
            }
4580
        }
4581
        /* End of 3 register same size operations.  */
4582
    } else if (insn & (1 << 4)) {
4583
        if ((insn & 0x00380080) != 0) {
4584
            /* Two registers and shift.  */
4585
            op = (insn >> 8) & 0xf;
4586
            if (insn & (1 << 7)) {
4587
                /* 64-bit shift.   */
4588
                size = 3;
4589
            } else {
4590
                size = 2;
4591
                while ((insn & (1 << (size + 19))) == 0)
4592
                    size--;
4593
            }
4594
            shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4595
            /* To avoid excessive dumplication of ops we implement shift
4596
               by immediate using the variable shift operations.  */
4597
            if (op < 8) {
4598
                /* Shift by immediate:
4599
                   VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU.  */
4600
                /* Right shifts are encoded as N - shift, where N is the
4601
                   element size in bits.  */
4602
                if (op <= 4)
4603
                    shift = shift - (1 << (size + 3));
4604
                if (size == 3) {
4605
                    count = q + 1;
4606
                } else {
4607
                    count = q ? 4: 2;
4608
                }
4609
                switch (size) {
4610
                case 0:
4611
                    imm = (uint8_t) shift;
4612
                    imm |= imm << 8;
4613
                    imm |= imm << 16;
4614
                    break;
4615
                case 1:
4616
                    imm = (uint16_t) shift;
4617
                    imm |= imm << 16;
4618
                    break;
4619
                case 2:
4620
                case 3:
4621
                    imm = shift;
4622
                    break;
4623
                default:
4624
                    abort();
4625
                }
4626

    
4627
                for (pass = 0; pass < count; pass++) {
4628
                    if (size == 3) {
4629
                        neon_load_reg64(cpu_V0, rm + pass);
4630
                        tcg_gen_movi_i64(cpu_V1, imm);
4631
                        switch (op) {
4632
                        case 0:  /* VSHR */
4633
                        case 1:  /* VSRA */
4634
                            if (u)
4635
                                gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4636
                            else
4637
                                gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4638
                            break;
4639
                        case 2: /* VRSHR */
4640
                        case 3: /* VRSRA */
4641
                            if (u)
4642
                                gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4643
                            else
4644
                                gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4645
                            break;
4646
                        case 4: /* VSRI */
4647
                            if (!u)
4648
                                return 1;
4649
                            gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4650
                            break;
4651
                        case 5: /* VSHL, VSLI */
4652
                            gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4653
                            break;
4654
                        case 6: /* VQSHL */
4655
                            if (u)
4656
                                gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4657
                            else
4658
                                gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4659
                            break;
4660
                        case 7: /* VQSHLU */
4661
                            gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4662
                            break;
4663
                        }
4664
                        if (op == 1 || op == 3) {
4665
                            /* Accumulate.  */
4666
                            neon_load_reg64(cpu_V0, rd + pass);
4667
                            tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4668
                        } else if (op == 4 || (op == 5 && u)) {
4669
                            /* Insert */
4670
                            cpu_abort(env, "VS[LR]I.64 not implemented");
4671
                        }
4672
                        neon_store_reg64(cpu_V0, rd + pass);
4673
                    } else { /* size < 3 */
4674
                        /* Operands in T0 and T1.  */
4675
                        tmp = neon_load_reg(rm, pass);
4676
                        tmp2 = new_tmp();
4677
                        tcg_gen_movi_i32(tmp2, imm);
4678
                        switch (op) {
4679
                        case 0:  /* VSHR */
4680
                        case 1:  /* VSRA */
4681
                            GEN_NEON_INTEGER_OP(shl);
4682
                            break;
4683
                        case 2: /* VRSHR */
4684
                        case 3: /* VRSRA */
4685
                            GEN_NEON_INTEGER_OP(rshl);
4686
                            break;
4687
                        case 4: /* VSRI */
4688
                            if (!u)
4689
                                return 1;
4690
                            GEN_NEON_INTEGER_OP(shl);
4691
                            break;
4692
                        case 5: /* VSHL, VSLI */
4693
                            switch (size) {
4694
                            case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4695
                            case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4696
                            case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4697
                            default: return 1;
4698
                            }
4699
                            break;
4700
                        case 6: /* VQSHL */
4701
                            GEN_NEON_INTEGER_OP_ENV(qshl);
4702
                            break;
4703
                        case 7: /* VQSHLU */
4704
                            switch (size) {
4705
                            case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4706
                            case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4707
                            case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
4708
                            default: return 1;
4709
                            }
4710
                            break;
4711
                        }
4712
                        dead_tmp(tmp2);
4713

    
4714
                        if (op == 1 || op == 3) {
4715
                            /* Accumulate.  */
4716
                            tmp2 = neon_load_reg(rd, pass);
4717
                            gen_neon_add(size, tmp2, tmp);
4718
                            dead_tmp(tmp2);
4719
                        } else if (op == 4 || (op == 5 && u)) {
4720
                            /* Insert */
4721
                            switch (size) {
4722
                            case 0:
4723
                                if (op == 4)
4724
                                    mask = 0xff >> -shift;
4725
                                else
4726
                                    mask = (uint8_t)(0xff << shift);
4727
                                mask |= mask << 8;
4728
                                mask |= mask << 16;
4729
                                break;
4730
                            case 1:
4731
                                if (op == 4)
4732
                                    mask = 0xffff >> -shift;
4733
                                else
4734
                                    mask = (uint16_t)(0xffff << shift);
4735
                                mask |= mask << 16;
4736
                                break;
4737
                            case 2:
4738
                                if (shift < -31 || shift > 31) {
4739
                                    mask = 0;
4740
                                } else {
4741
                                    if (op == 4)
4742
                                        mask = 0xffffffffu >> -shift;
4743
                                    else
4744
                                        mask = 0xffffffffu << shift;
4745
                                }
4746
                                break;
4747
                            default:
4748
                                abort();
4749
                            }
4750
                            tmp2 = neon_load_reg(rd, pass);
4751
                            tcg_gen_andi_i32(tmp, tmp, mask);
4752
                            tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4753
                            tcg_gen_or_i32(tmp, tmp, tmp2);
4754
                            dead_tmp(tmp2);
4755
                        }
4756
                        neon_store_reg(rd, pass, tmp);
4757
                    }
4758
                } /* for pass */
4759
            } else if (op < 10) {
4760
                /* Shift by immediate and narrow:
4761
                   VSHRN, VRSHRN, VQSHRN, VQRSHRN.  */
4762
                shift = shift - (1 << (size + 3));
4763
                size++;
4764
                switch (size) {
4765
                case 1:
4766
                    imm = (uint16_t)shift;
4767
                    imm |= imm << 16;
4768
                    tmp2 = tcg_const_i32(imm);
4769
                    TCGV_UNUSED_I64(tmp64);
4770
                    break;
4771
                case 2:
4772
                    imm = (uint32_t)shift;
4773
                    tmp2 = tcg_const_i32(imm);
4774
                    TCGV_UNUSED_I64(tmp64);
4775
                    break;
4776
                case 3:
4777
                    tmp64 = tcg_const_i64(shift);
4778
                    TCGV_UNUSED(tmp2);
4779
                    break;
4780
                default:
4781
                    abort();
4782
                }
4783

    
4784
                for (pass = 0; pass < 2; pass++) {
4785
                    if (size == 3) {
4786
                        neon_load_reg64(cpu_V0, rm + pass);
4787
                        if (q) {
4788
                          if (u)
4789
                            gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4790
                          else
4791
                            gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4792
                        } else {
4793
                          if (u)
4794
                            gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4795
                          else
4796
                            gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4797
                        }
4798
                    } else {
4799
                        tmp = neon_load_reg(rm + pass, 0);
4800
                        gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4801
                        tmp3 = neon_load_reg(rm + pass, 1);
4802
                        gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4803
                        tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4804
                        dead_tmp(tmp);
4805
                        dead_tmp(tmp3);
4806
                    }
4807
                    tmp = new_tmp();
4808
                    if (op == 8 && !u) {
4809
                        gen_neon_narrow(size - 1, tmp, cpu_V0);
4810
                    } else {
4811
                        if (op == 8)
4812
                            gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4813
                        else
4814
                            gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4815
                    }
4816
                    neon_store_reg(rd, pass, tmp);
4817
                } /* for pass */
4818
                if (size == 3) {
4819
                    tcg_temp_free_i64(tmp64);
4820
                } else {
4821
                    dead_tmp(tmp2);
4822
                }
4823
            } else if (op == 10) {
4824
                /* VSHLL */
4825
                if (q || size == 3)
4826
                    return 1;
4827
                tmp = neon_load_reg(rm, 0);
4828
                tmp2 = neon_load_reg(rm, 1);
4829
                for (pass = 0; pass < 2; pass++) {
4830
                    if (pass == 1)
4831
                        tmp = tmp2;
4832

    
4833
                    gen_neon_widen(cpu_V0, tmp, size, u);
4834

    
4835
                    if (shift != 0) {
4836
                        /* The shift is less than the width of the source
4837
                           type, so we can just shift the whole register.  */
4838
                        tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4839
                        if (size < 2 || !u) {
4840
                            uint64_t imm64;
4841
                            if (size == 0) {
4842
                                imm = (0xffu >> (8 - shift));
4843
                                imm |= imm << 16;
4844
                            } else {
4845
                                imm = 0xffff >> (16 - shift);
4846
                            }
4847
                            imm64 = imm | (((uint64_t)imm) << 32);
4848
                            tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
4849
                        }
4850
                    }
4851
                    neon_store_reg64(cpu_V0, rd + pass);
4852
                }
4853
            } else if (op >= 14) {
4854
                /* VCVT fixed-point.  */
4855
                /* We have already masked out the must-be-1 top bit of imm6,
4856
                 * hence this 32-shift where the ARM ARM has 64-imm6.
4857
                 */
4858
                shift = 32 - shift;
4859
                for (pass = 0; pass < (q ? 4 : 2); pass++) {
4860
                    tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4861
                    if (!(op & 1)) {
4862
                        if (u)
4863
                            gen_vfp_ulto(0, shift);
4864
                        else
4865
                            gen_vfp_slto(0, shift);
4866
                    } else {
4867
                        if (u)
4868
                            gen_vfp_toul(0, shift);
4869
                        else
4870
                            gen_vfp_tosl(0, shift);
4871
                    }
4872
                    tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4873
                }
4874
            } else {
4875
                return 1;
4876
            }
4877
        } else { /* (insn & 0x00380080) == 0 */
4878
            int invert;
4879

    
4880
            op = (insn >> 8) & 0xf;
4881
            /* One register and immediate.  */
4882
            imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4883
            invert = (insn & (1 << 5)) != 0;
4884
            switch (op) {
4885
            case 0: case 1:
4886
                /* no-op */
4887
                break;
4888
            case 2: case 3:
4889
                imm <<= 8;
4890
                break;
4891
            case 4: case 5:
4892
                imm <<= 16;
4893
                break;
4894
            case 6: case 7:
4895
                imm <<= 24;
4896
                break;
4897
            case 8: case 9:
4898
                imm |= imm << 16;
4899
                break;
4900
            case 10: case 11:
4901
                imm = (imm << 8) | (imm << 24);
4902
                break;
4903
            case 12:
4904
                imm = (imm << 8) | 0xff;
4905
                break;
4906
            case 13:
4907
                imm = (imm << 16) | 0xffff;
4908
                break;
4909
            case 14:
4910
                imm |= (imm << 8) | (imm << 16) | (imm << 24);
4911
                if (invert)
4912
                    imm = ~imm;
4913
                break;
4914
            case 15:
4915
                imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4916
                      | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4917
                break;
4918
            }
4919
            if (invert)
4920
                imm = ~imm;
4921

    
4922
            for (pass = 0; pass < (q ? 4 : 2); pass++) {
4923
                if (op & 1 && op < 12) {
4924
                    tmp = neon_load_reg(rd, pass);
4925
                    if (invert) {
4926
                        /* The immediate value has already been inverted, so
4927
                           BIC becomes AND.  */
4928
                        tcg_gen_andi_i32(tmp, tmp, imm);
4929
                    } else {
4930
                        tcg_gen_ori_i32(tmp, tmp, imm);
4931
                    }
4932
                } else {
4933
                    /* VMOV, VMVN.  */
4934
                    tmp = new_tmp();
4935
                    if (op == 14 && invert) {
4936
                        uint32_t val;
4937
                        val = 0;
4938
                        for (n = 0; n < 4; n++) {
4939
                            if (imm & (1 << (n + (pass & 1) * 4)))
4940
                                val |= 0xff << (n * 8);
4941
                        }
4942
                        tcg_gen_movi_i32(tmp, val);
4943
                    } else {
4944
                        tcg_gen_movi_i32(tmp, imm);
4945
                    }
4946
                }
4947
                neon_store_reg(rd, pass, tmp);
4948
            }
4949
        }
4950
    } else { /* (insn & 0x00800010 == 0x00800000) */
4951
        if (size != 3) {
4952
            op = (insn >> 8) & 0xf;
4953
            if ((insn & (1 << 6)) == 0) {
4954
                /* Three registers of different lengths.  */
4955
                int src1_wide;
4956
                int src2_wide;
4957
                int prewiden;
4958
                /* prewiden, src1_wide, src2_wide */
4959
                static const int neon_3reg_wide[16][3] = {
4960
                    {1, 0, 0}, /* VADDL */
4961
                    {1, 1, 0}, /* VADDW */
4962
                    {1, 0, 0}, /* VSUBL */
4963
                    {1, 1, 0}, /* VSUBW */
4964
                    {0, 1, 1}, /* VADDHN */
4965
                    {0, 0, 0}, /* VABAL */
4966
                    {0, 1, 1}, /* VSUBHN */
4967
                    {0, 0, 0}, /* VABDL */
4968
                    {0, 0, 0}, /* VMLAL */
4969
                    {0, 0, 0}, /* VQDMLAL */
4970
                    {0, 0, 0}, /* VMLSL */
4971
                    {0, 0, 0}, /* VQDMLSL */
4972
                    {0, 0, 0}, /* Integer VMULL */
4973
                    {0, 0, 0}, /* VQDMULL */
4974
                    {0, 0, 0}  /* Polynomial VMULL */
4975
                };
4976

    
4977
                prewiden = neon_3reg_wide[op][0];
4978
                src1_wide = neon_3reg_wide[op][1];
4979
                src2_wide = neon_3reg_wide[op][2];
4980

    
4981
                if (size == 0 && (op == 9 || op == 11 || op == 13))
4982
                    return 1;
4983

    
4984
                /* Avoid overlapping operands.  Wide source operands are
4985
                   always aligned so will never overlap with wide
4986
                   destinations in problematic ways.  */
4987
                if (rd == rm && !src2_wide) {
4988
                    tmp = neon_load_reg(rm, 1);
4989
                    neon_store_scratch(2, tmp);
4990
                } else if (rd == rn && !src1_wide) {
4991
                    tmp = neon_load_reg(rn, 1);
4992
                    neon_store_scratch(2, tmp);
4993
                }
4994
                TCGV_UNUSED(tmp3);
4995
                for (pass = 0; pass < 2; pass++) {
4996
                    if (src1_wide) {
4997
                        neon_load_reg64(cpu_V0, rn + pass);
4998
                        TCGV_UNUSED(tmp);
4999
                    } else {
5000
                        if (pass == 1 && rd == rn) {
5001
                            tmp = neon_load_scratch(2);
5002
                        } else {
5003
                            tmp = neon_load_reg(rn, pass);
5004
                        }
5005
                        if (prewiden) {
5006
                            gen_neon_widen(cpu_V0, tmp, size, u);
5007
                        }
5008
                    }
5009
                    if (src2_wide) {
5010
                        neon_load_reg64(cpu_V1, rm + pass);
5011
                        TCGV_UNUSED(tmp2);
5012
                    } else {
5013
                        if (pass == 1 && rd == rm) {
5014
                            tmp2 = neon_load_scratch(2);
5015
                        } else {
5016
                            tmp2 = neon_load_reg(rm, pass);
5017
                        }
5018
                        if (prewiden) {
5019
                            gen_neon_widen(cpu_V1, tmp2, size, u);
5020
                        }
5021
                    }
5022
                    switch (op) {
5023
                    case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5024
                        gen_neon_addl(size);
5025
                        break;
5026
                    case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5027
                        gen_neon_subl(size);
5028
                        break;
5029
                    case 5: case 7: /* VABAL, VABDL */
5030
                        switch ((size << 1) | u) {
5031
                        case 0:
5032
                            gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5033
                            break;
5034
                        case 1:
5035
                            gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5036
                            break;
5037
                        case 2:
5038
                            gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5039
                            break;
5040
                        case 3:
5041
                            gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5042
                            break;
5043
                        case 4:
5044
                            gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5045
                            break;
5046
                        case 5:
5047
                            gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5048
                            break;
5049
                        default: abort();
5050
                        }
5051
                        dead_tmp(tmp2);
5052
                        dead_tmp(tmp);
5053
                        break;
5054
                    case 8: case 9: case 10: case 11: case 12: case 13:
5055
                        /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5056
                        gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5057
                        dead_tmp(tmp2);
5058
                        dead_tmp(tmp);
5059
                        break;
5060
                    case 14: /* Polynomial VMULL */
5061
                        cpu_abort(env, "Polynomial VMULL not implemented");
5062

    
5063
                    default: /* 15 is RESERVED.  */
5064
                        return 1;
5065
                    }
5066
                    if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5067
                        /* Accumulate.  */
5068
                        if (op == 10 || op == 11) {
5069
                            gen_neon_negl(cpu_V0, size);
5070
                        }
5071

    
5072
                        if (op != 13) {
5073
                            neon_load_reg64(cpu_V1, rd + pass);
5074
                        }
5075

    
5076
                        switch (op) {
5077
                        case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
5078
                            gen_neon_addl(size);
5079
                            break;
5080
                        case 9: case 11: /* VQDMLAL, VQDMLSL */
5081
                            gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5082
                            gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5083
                            break;
5084
                            /* Fall through.  */
5085
                        case 13: /* VQDMULL */
5086
                            gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5087
                            break;
5088
                        default:
5089
                            abort();
5090
                        }
5091
                        neon_store_reg64(cpu_V0, rd + pass);
5092
                    } else if (op == 4 || op == 6) {
5093
                        /* Narrowing operation.  */
5094
                        tmp = new_tmp();
5095
                        if (!u) {
5096
                            switch (size) {
5097
                            case 0:
5098
                                gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5099
                                break;
5100
                            case 1:
5101
                                gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5102
                                break;
5103
                            case 2:
5104
                                tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5105
                                tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5106
                                break;
5107
                            default: abort();
5108
                            }
5109
                        } else {
5110
                            switch (size) {
5111
                            case 0:
5112
                                gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5113
                                break;
5114
                            case 1:
5115
                                gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5116
                                break;
5117
                            case 2:
5118
                                tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5119
                                tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5120
                                tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5121
                                break;
5122
                            default: abort();
5123
                            }
5124
                        }
5125
                        if (pass == 0) {
5126
                            tmp3 = tmp;
5127
                        } else {
5128
                            neon_store_reg(rd, 0, tmp3);
5129
                            neon_store_reg(rd, 1, tmp);
5130
                        }
5131
                    } else {
5132
                        /* Write back the result.  */
5133
                        neon_store_reg64(cpu_V0, rd + pass);
5134
                    }
5135
                }
5136
            } else {
5137
                /* Two registers and a scalar.  */
5138
                switch (op) {
5139
                case 0: /* Integer VMLA scalar */
5140
                case 1: /* Float VMLA scalar */
5141
                case 4: /* Integer VMLS scalar */
5142
                case 5: /* Floating point VMLS scalar */
5143
                case 8: /* Integer VMUL scalar */
5144
                case 9: /* Floating point VMUL scalar */
5145
                case 12: /* VQDMULH scalar */
5146
                case 13: /* VQRDMULH scalar */
5147
                    tmp = neon_get_scalar(size, rm);
5148
                    neon_store_scratch(0, tmp);
5149
                    for (pass = 0; pass < (u ? 4 : 2); pass++) {
5150
                        tmp = neon_load_scratch(0);
5151
                        tmp2 = neon_load_reg(rn, pass);
5152
                        if (op == 12) {
5153
                            if (size == 1) {
5154
                                gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5155
                            } else {
5156
                                gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5157
                            }
5158
                        } else if (op == 13) {
5159
                            if (size == 1) {
5160
                                gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5161
                            } else {
5162
                                gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5163
                            }
5164
                        } else if (op & 1) {
5165
                            gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5166
                        } else {
5167
                            switch (size) {
5168
                            case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5169
                            case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5170
                            case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5171
                            default: return 1;
5172
                            }
5173
                        }
5174
                        dead_tmp(tmp2);
5175
                        if (op < 8) {
5176
                            /* Accumulate.  */
5177
                            tmp2 = neon_load_reg(rd, pass);
5178
                            switch (op) {
5179
                            case 0:
5180
                                gen_neon_add(size, tmp, tmp2);
5181
                                break;
5182
                            case 1:
5183
                                gen_helper_neon_add_f32(tmp, tmp, tmp2);
5184
                                break;
5185
                            case 4:
5186
                                gen_neon_rsb(size, tmp, tmp2);
5187
                                break;
5188
                            case 5:
5189
                                gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5190
                                break;
5191
                            default:
5192
                                abort();
5193
                            }
5194
                            dead_tmp(tmp2);
5195
                        }
5196
                        neon_store_reg(rd, pass, tmp);
5197
                    }
5198
                    break;
5199
                case 2: /* VMLAL sclar */
5200
                case 3: /* VQDMLAL scalar */
5201
                case 6: /* VMLSL scalar */
5202
                case 7: /* VQDMLSL scalar */
5203
                case 10: /* VMULL scalar */
5204
                case 11: /* VQDMULL scalar */
5205
                    if (size == 0 && (op == 3 || op == 7 || op == 11))
5206
                        return 1;
5207

    
5208
                    tmp2 = neon_get_scalar(size, rm);
5209
                    tmp3 = neon_load_reg(rn, 1);
5210

    
5211
                    for (pass = 0; pass < 2; pass++) {
5212
                        if (pass == 0) {
5213
                            tmp = neon_load_reg(rn, 0);
5214
                        } else {
5215
                            tmp = tmp3;
5216
                        }
5217
                        gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5218
                        dead_tmp(tmp);
5219
                        if (op == 6 || op == 7) {
5220
                            gen_neon_negl(cpu_V0, size);
5221
                        }
5222
                        if (op != 11) {
5223
                            neon_load_reg64(cpu_V1, rd + pass);
5224
                        }
5225
                        switch (op) {
5226
                        case 2: case 6:
5227
                            gen_neon_addl(size);
5228
                            break;
5229
                        case 3: case 7:
5230
                            gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5231
                            gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5232
                            break;
5233
                        case 10:
5234
                            /* no-op */
5235
                            break;
5236
                        case 11:
5237
                            gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5238
                            break;
5239
                        default:
5240
                            abort();
5241
                        }
5242
                        neon_store_reg64(cpu_V0, rd + pass);
5243
                    }
5244

    
5245
                    dead_tmp(tmp2);
5246

    
5247
                    break;
5248
                default: /* 14 and 15 are RESERVED */
5249
                    return 1;
5250
                }
5251
            }
5252
        } else { /* size == 3 */
5253
            if (!u) {
5254
                /* Extract.  */
5255
                imm = (insn >> 8) & 0xf;
5256

    
5257
                if (imm > 7 && !q)
5258
                    return 1;
5259

    
5260
                if (imm == 0) {
5261
                    neon_load_reg64(cpu_V0, rn);
5262
                    if (q) {
5263
                        neon_load_reg64(cpu_V1, rn + 1);
5264
                    }
5265
                } else if (imm == 8) {
5266
                    neon_load_reg64(cpu_V0, rn + 1);
5267
                    if (q) {
5268
                        neon_load_reg64(cpu_V1, rm);
5269
                    }
5270
                } else if (q) {
5271
                    tmp64 = tcg_temp_new_i64();
5272
                    if (imm < 8) {
5273
                        neon_load_reg64(cpu_V0, rn);
5274
                        neon_load_reg64(tmp64, rn + 1);
5275
                    } else {
5276
                        neon_load_reg64(cpu_V0, rn + 1);
5277
                        neon_load_reg64(tmp64, rm);
5278
                    }
5279
                    tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5280
                    tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5281
                    tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5282
                    if (imm < 8) {
5283
                        neon_load_reg64(cpu_V1, rm);
5284
                    } else {
5285
                        neon_load_reg64(cpu_V1, rm + 1);
5286
                        imm -= 8;
5287
                    }
5288
                    tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5289
                    tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5290
                    tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5291
                    tcg_temp_free_i64(tmp64);
5292
                } else {
5293
                    /* BUGFIX */
5294
                    neon_load_reg64(cpu_V0, rn);
5295
                    tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5296
                    neon_load_reg64(cpu_V1, rm);
5297
                    tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5298
                    tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5299
                }
5300
                neon_store_reg64(cpu_V0, rd);
5301
                if (q) {
5302
                    neon_store_reg64(cpu_V1, rd + 1);
5303
                }
5304
            } else if ((insn & (1 << 11)) == 0) {
5305
                /* Two register misc.  */
5306
                op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5307
                size = (insn >> 18) & 3;
5308
                switch (op) {
5309
                case 0: /* VREV64 */
5310
                    if (size == 3)
5311
                        return 1;
5312
                    for (pass = 0; pass < (q ? 2 : 1); pass++) {
5313
                        tmp = neon_load_reg(rm, pass * 2);
5314
                        tmp2 = neon_load_reg(rm, pass * 2 + 1);
5315
                        switch (size) {
5316
                        case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5317
                        case 1: gen_swap_half(tmp); break;
5318
                        case 2: /* no-op */ break;
5319
                        default: abort();
5320
                        }
5321
                        neon_store_reg(rd, pass * 2 + 1, tmp);
5322
                        if (size == 2) {
5323
                            neon_store_reg(rd, pass * 2, tmp2);
5324
                        } else {
5325
                            switch (size) {
5326
                            case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5327
                            case 1: gen_swap_half(tmp2); break;
5328
                            default: abort();
5329
                            }
5330
                            neon_store_reg(rd, pass * 2, tmp2);
5331
                        }
5332
                    }
5333
                    break;
5334
                case 4: case 5: /* VPADDL */
5335
                case 12: case 13: /* VPADAL */
5336
                    if (size == 3)
5337
                        return 1;
5338
                    for (pass = 0; pass < q + 1; pass++) {
5339
                        tmp = neon_load_reg(rm, pass * 2);
5340
                        gen_neon_widen(cpu_V0, tmp, size, op & 1);
5341
                        tmp = neon_load_reg(rm, pass * 2 + 1);
5342
                        gen_neon_widen(cpu_V1, tmp, size, op & 1);
5343
                        switch (size) {
5344
                        case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5345
                        case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5346
                        case 2: tcg_gen_add_i64(CPU_V001); break;
5347
                        default: abort();
5348
                        }
5349
                        if (op >= 12) {
5350
                            /* Accumulate.  */
5351
                            neon_load_reg64(cpu_V1, rd + pass);
5352
                            gen_neon_addl(size);
5353
                        }
5354
                        neon_store_reg64(cpu_V0, rd + pass);
5355
                    }
5356
                    break;
5357
                case 33: /* VTRN */
5358
                    if (size == 2) {
5359
                        for (n = 0; n < (q ? 4 : 2); n += 2) {
5360
                            tmp = neon_load_reg(rm, n);
5361
                            tmp2 = neon_load_reg(rd, n + 1);
5362
                            neon_store_reg(rm, n, tmp2);
5363
                            neon_store_reg(rd, n + 1, tmp);
5364
                        }
5365
                    } else {
5366
                        goto elementwise;
5367
                    }
5368
                    break;
5369
                case 34: /* VUZP */
5370
                    /* Reg  Before       After
5371
                       Rd   A3 A2 A1 A0  B2 B0 A2 A0
5372
                       Rm   B3 B2 B1 B0  B3 B1 A3 A1
5373
                     */
5374
                    if (size == 3)
5375
                        return 1;
5376
                    gen_neon_unzip(rd, q, 0, size);
5377
                    gen_neon_unzip(rm, q, 4, size);
5378
                    if (q) {
5379
                        static int unzip_order_q[8] =
5380
                            {0, 2, 4, 6, 1, 3, 5, 7};
5381
                        for (n = 0; n < 8; n++) {
5382
                            int reg = (n < 4) ? rd : rm;
5383
                            tmp = neon_load_scratch(unzip_order_q[n]);
5384
                            neon_store_reg(reg, n % 4, tmp);
5385
                        }
5386
                    } else {
5387
                        static int unzip_order[4] =
5388
                            {0, 4, 1, 5};
5389
                        for (n = 0; n < 4; n++) {
5390
                            int reg = (n < 2) ? rd : rm;
5391
                            tmp = neon_load_scratch(unzip_order[n]);
5392
                            neon_store_reg(reg, n % 2, tmp);
5393
                        }
5394
                    }
5395
                    break;
5396
                case 35: /* VZIP */
5397
                    /* Reg  Before       After
5398
                       Rd   A3 A2 A1 A0  B1 A1 B0 A0
5399
                       Rm   B3 B2 B1 B0  B3 A3 B2 A2
5400
                     */
5401
                    if (size == 3)
5402
                        return 1;
5403
                    count = (q ? 4 : 2);
5404
                    for (n = 0; n < count; n++) {
5405
                        tmp = neon_load_reg(rd, n);
5406
                        tmp2 = neon_load_reg(rd, n);
5407
                        switch (size) {
5408
                        case 0: gen_neon_zip_u8(tmp, tmp2); break;
5409
                        case 1: gen_neon_zip_u16(tmp, tmp2); break;
5410
                        case 2: /* no-op */; break;
5411
                        default: abort();
5412
                        }
5413
                        neon_store_scratch(n * 2, tmp);
5414
                        neon_store_scratch(n * 2 + 1, tmp2);
5415
                    }
5416
                    for (n = 0; n < count * 2; n++) {
5417
                        int reg = (n < count) ? rd : rm;
5418
                        tmp = neon_load_scratch(n);
5419
                        neon_store_reg(reg, n % count, tmp);
5420
                    }
5421
                    break;
5422
                case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5423
                    if (size == 3)
5424
                        return 1;
5425
                    TCGV_UNUSED(tmp2);
5426
                    for (pass = 0; pass < 2; pass++) {
5427
                        neon_load_reg64(cpu_V0, rm + pass);
5428
                        tmp = new_tmp();
5429
                        if (op == 36 && q == 0) {
5430
                            gen_neon_narrow(size, tmp, cpu_V0);
5431
                        } else if (q) {
5432
                            gen_neon_narrow_satu(size, tmp, cpu_V0);
5433
                        } else {
5434
                            gen_neon_narrow_sats(size, tmp, cpu_V0);
5435
                        }
5436
                        if (pass == 0) {
5437
                            tmp2 = tmp;
5438
                        } else {
5439
                            neon_store_reg(rd, 0, tmp2);
5440
                            neon_store_reg(rd, 1, tmp);
5441
                        }
5442
                    }
5443
                    break;
5444
                case 38: /* VSHLL */
5445
                    if (q || size == 3)
5446
                        return 1;
5447
                    tmp = neon_load_reg(rm, 0);
5448
                    tmp2 = neon_load_reg(rm, 1);
5449
                    for (pass = 0; pass < 2; pass++) {
5450
                        if (pass == 1)
5451
                            tmp = tmp2;
5452
                        gen_neon_widen(cpu_V0, tmp, size, 1);
5453
                        tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5454
                        neon_store_reg64(cpu_V0, rd + pass);
5455
                    }
5456
                    break;
5457
                case 44: /* VCVT.F16.F32 */
5458
                    if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5459
                      return 1;
5460
                    tmp = new_tmp();
5461
                    tmp2 = new_tmp();
5462
                    tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5463
                    gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5464
                    tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5465
                    gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5466
                    tcg_gen_shli_i32(tmp2, tmp2, 16);
5467
                    tcg_gen_or_i32(tmp2, tmp2, tmp);
5468
                    tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5469
                    gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5470
                    tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5471
                    neon_store_reg(rd, 0, tmp2);
5472
                    tmp2 = new_tmp();
5473
                    gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5474
                    tcg_gen_shli_i32(tmp2, tmp2, 16);
5475
                    tcg_gen_or_i32(tmp2, tmp2, tmp);
5476
                    neon_store_reg(rd, 1, tmp2);
5477
                    dead_tmp(tmp);
5478
                    break;
5479
                case 46: /* VCVT.F32.F16 */
5480
                    if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5481
                      return 1;
5482
                    tmp3 = new_tmp();
5483
                    tmp = neon_load_reg(rm, 0);
5484
                    tmp2 = neon_load_reg(rm, 1);
5485
                    tcg_gen_ext16u_i32(tmp3, tmp);
5486
                    gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5487
                    tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5488
                    tcg_gen_shri_i32(tmp3, tmp, 16);
5489
                    gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5490
                    tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5491
                    dead_tmp(tmp);
5492
                    tcg_gen_ext16u_i32(tmp3, tmp2);
5493
                    gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5494
                    tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5495
                    tcg_gen_shri_i32(tmp3, tmp2, 16);
5496
                    gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5497
                    tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5498
                    dead_tmp(tmp2);
5499
                    dead_tmp(tmp3);
5500
                    break;
5501
                default:
5502
                elementwise:
5503
                    for (pass = 0; pass < (q ? 4 : 2); pass++) {
5504
                        if (op == 30 || op == 31 || op >= 58) {
5505
                            tcg_gen_ld_f32(cpu_F0s, cpu_env,
5506
                                           neon_reg_offset(rm, pass));
5507
                            TCGV_UNUSED(tmp);
5508
                        } else {
5509
                            tmp = neon_load_reg(rm, pass);
5510
                        }
5511
                        switch (op) {
5512
                        case 1: /* VREV32 */
5513
                            switch (size) {
5514
                            case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5515
                            case 1: gen_swap_half(tmp); break;
5516
                            default: return 1;
5517
                            }
5518
                            break;
5519
                        case 2: /* VREV16 */
5520
                            if (size != 0)
5521
                                return 1;
5522
                            gen_rev16(tmp);
5523
                            break;
5524
                        case 8: /* CLS */
5525
                            switch (size) {
5526
                            case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5527
                            case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5528
                            case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5529
                            default: return 1;
5530
                            }
5531
                            break;
5532
                        case 9: /* CLZ */
5533
                            switch (size) {
5534
                            case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5535
                            case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5536
                            case 2: gen_helper_clz(tmp, tmp); break;
5537
                            default: return 1;
5538
                            }
5539
                            break;
5540
                        case 10: /* CNT */
5541
                            if (size != 0)
5542
                                return 1;
5543
                            gen_helper_neon_cnt_u8(tmp, tmp);
5544
                            break;
5545
                        case 11: /* VNOT */
5546
                            if (size != 0)
5547
                                return 1;
5548
                            tcg_gen_not_i32(tmp, tmp);
5549
                            break;
5550
                        case 14: /* VQABS */
5551
                            switch (size) {
5552
                            case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5553
                            case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5554
                            case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
5555
                            default: return 1;
5556
                            }
5557
                            break;
5558
                        case 15: /* VQNEG */
5559
                            switch (size) {
5560
                            case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5561
                            case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5562
                            case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
5563
                            default: return 1;
5564
                            }
5565
                            break;
5566
                        case 16: case 19: /* VCGT #0, VCLE #0 */
5567
                            tmp2 = tcg_const_i32(0);
5568
                            switch(size) {
5569
                            case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5570
                            case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5571
                            case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5572
                            default: return 1;
5573
                            }
5574
                            tcg_temp_free(tmp2);
5575
                            if (op == 19)
5576
                                tcg_gen_not_i32(tmp, tmp);
5577
                            break;
5578
                        case 17: case 20: /* VCGE #0, VCLT #0 */
5579
                            tmp2 = tcg_const_i32(0);
5580
                            switch(size) {
5581
                            case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5582
                            case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5583
                            case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5584
                            default: return 1;
5585
                            }
5586
                            tcg_temp_free(tmp2);
5587
                            if (op == 20)
5588
                                tcg_gen_not_i32(tmp, tmp);
5589
                            break;
5590
                        case 18: /* VCEQ #0 */
5591
                            tmp2 = tcg_const_i32(0);
5592
                            switch(size) {
5593
                            case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5594
                            case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5595
                            case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5596
                            default: return 1;
5597
                            }
5598
                            tcg_temp_free(tmp2);
5599
                            break;
5600
                        case 22: /* VABS */
5601
                            switch(size) {
5602
                            case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5603
                            case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5604
                            case 2: tcg_gen_abs_i32(tmp, tmp); break;
5605
                            default: return 1;
5606
                            }
5607
                            break;
5608
                        case 23: /* VNEG */
5609
                            if (size == 3)
5610
                                return 1;
5611
                            tmp2 = tcg_const_i32(0);
5612
                            gen_neon_rsb(size, tmp, tmp2);
5613
                            tcg_temp_free(tmp2);
5614
                            break;
5615
                        case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5616
                            tmp2 = tcg_const_i32(0);
5617
                            gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5618
                            tcg_temp_free(tmp2);
5619
                            if (op == 27)
5620
                                tcg_gen_not_i32(tmp, tmp);
5621
                            break;
5622
                        case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5623
                            tmp2 = tcg_const_i32(0);
5624
                            gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5625
                            tcg_temp_free(tmp2);
5626
                            if (op == 28)
5627
                                tcg_gen_not_i32(tmp, tmp);
5628
                            break;
5629
                        case 26: /* Float VCEQ #0 */
5630
                            tmp2 = tcg_const_i32(0);
5631
                            gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5632
                            tcg_temp_free(tmp2);
5633
                            break;
5634
                        case 30: /* Float VABS */
5635
                            gen_vfp_abs(0);
5636
                            break;
5637
                        case 31: /* Float VNEG */
5638
                            gen_vfp_neg(0);
5639
                            break;
5640
                        case 32: /* VSWP */
5641
                            tmp2 = neon_load_reg(rd, pass);
5642
                            neon_store_reg(rm, pass, tmp2);
5643
                            break;
5644
                        case 33: /* VTRN */
5645
                            tmp2 = neon_load_reg(rd, pass);
5646
                            switch (size) {
5647
                            case 0: gen_neon_trn_u8(tmp, tmp2); break;
5648
                            case 1: gen_neon_trn_u16(tmp, tmp2); break;
5649
                            case 2: abort();
5650
                            default: return 1;
5651
                            }
5652
                            neon_store_reg(rm, pass, tmp2);
5653
                            break;
5654
                        case 56: /* Integer VRECPE */
5655
                            gen_helper_recpe_u32(tmp, tmp, cpu_env);
5656
                            break;
5657
                        case 57: /* Integer VRSQRTE */
5658
                            gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5659
                            break;
5660
                        case 58: /* Float VRECPE */
5661
                            gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5662
                            break;
5663
                        case 59: /* Float VRSQRTE */
5664
                            gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5665
                            break;
5666
                        case 60: /* VCVT.F32.S32 */
5667
                            gen_vfp_tosiz(0);
5668
                            break;
5669
                        case 61: /* VCVT.F32.U32 */
5670
                            gen_vfp_touiz(0);
5671
                            break;
5672
                        case 62: /* VCVT.S32.F32 */
5673
                            gen_vfp_sito(0);
5674
                            break;
5675
                        case 63: /* VCVT.U32.F32 */
5676
                            gen_vfp_uito(0);
5677
                            break;
5678
                        default:
5679
                            /* Reserved: 21, 29, 39-56 */
5680
                            return 1;
5681
                        }
5682
                        if (op == 30 || op == 31 || op >= 58) {
5683
                            tcg_gen_st_f32(cpu_F0s, cpu_env,
5684
                                           neon_reg_offset(rd, pass));
5685
                        } else {
5686
                            neon_store_reg(rd, pass, tmp);
5687
                        }
5688
                    }
5689
                    break;
5690
                }
5691
            } else if ((insn & (1 << 10)) == 0) {
5692
                /* VTBL, VTBX.  */
5693
                n = ((insn >> 5) & 0x18) + 8;
5694
                if (insn & (1 << 6)) {
5695
                    tmp = neon_load_reg(rd, 0);
5696
                } else {
5697
                    tmp = new_tmp();
5698
                    tcg_gen_movi_i32(tmp, 0);
5699
                }
5700
                tmp2 = neon_load_reg(rm, 0);
5701
                tmp4 = tcg_const_i32(rn);
5702
                tmp5 = tcg_const_i32(n);
5703
                gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5704
                dead_tmp(tmp);
5705
                if (insn & (1 << 6)) {
5706
                    tmp = neon_load_reg(rd, 1);
5707
                } else {
5708
                    tmp = new_tmp();
5709
                    tcg_gen_movi_i32(tmp, 0);
5710
                }
5711
                tmp3 = neon_load_reg(rm, 1);
5712
                gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5713
                tcg_temp_free_i32(tmp5);
5714
                tcg_temp_free_i32(tmp4);
5715
                neon_store_reg(rd, 0, tmp2);
5716
                neon_store_reg(rd, 1, tmp3);
5717
                dead_tmp(tmp);
5718
            } else if ((insn & 0x380) == 0) {
5719
                /* VDUP */
5720
                if (insn & (1 << 19)) {
5721
                    tmp = neon_load_reg(rm, 1);
5722
                } else {
5723
                    tmp = neon_load_reg(rm, 0);
5724
                }
5725
                if (insn & (1 << 16)) {
5726
                    gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5727
                } else if (insn & (1 << 17)) {
5728
                    if ((insn >> 18) & 1)
5729
                        gen_neon_dup_high16(tmp);
5730
                    else
5731
                        gen_neon_dup_low16(tmp);
5732
                }
5733
                for (pass = 0; pass < (q ? 4 : 2); pass++) {
5734
                    tmp2 = new_tmp();
5735
                    tcg_gen_mov_i32(tmp2, tmp);
5736
                    neon_store_reg(rd, pass, tmp2);
5737
                }
5738
                dead_tmp(tmp);
5739
            } else {
5740
                return 1;
5741
            }
5742
        }
5743
    }
5744
    return 0;
5745
}
5746

    
5747
static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5748
{
5749
    int crn = (insn >> 16) & 0xf;
5750
    int crm = insn & 0xf;
5751
    int op1 = (insn >> 21) & 7;
5752
    int op2 = (insn >> 5) & 7;
5753
    int rt = (insn >> 12) & 0xf;
5754
    TCGv tmp;
5755

    
5756
    if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5757
        if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5758
            /* TEECR */
5759
            if (IS_USER(s))
5760
                return 1;
5761
            tmp = load_cpu_field(teecr);
5762
            store_reg(s, rt, tmp);
5763
            return 0;
5764
        }
5765
        if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5766
            /* TEEHBR */
5767
            if (IS_USER(s) && (env->teecr & 1))
5768
                return 1;
5769
            tmp = load_cpu_field(teehbr);
5770
            store_reg(s, rt, tmp);
5771
            return 0;
5772
        }
5773
    }
5774
    fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5775
            op1, crn, crm, op2);
5776
    return 1;
5777
}
5778

    
5779
static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5780
{
5781
    int crn = (insn >> 16) & 0xf;
5782
    int crm = insn & 0xf;
5783
    int op1 = (insn >> 21) & 7;
5784
    int op2 = (insn >> 5) & 7;
5785
    int rt = (insn >> 12) & 0xf;
5786
    TCGv tmp;
5787

    
5788
    if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5789
        if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5790
            /* TEECR */
5791
            if (IS_USER(s))
5792
                return 1;
5793
            tmp = load_reg(s, rt);
5794
            gen_helper_set_teecr(cpu_env, tmp);
5795
            dead_tmp(tmp);
5796
            return 0;
5797
        }
5798
        if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5799
            /* TEEHBR */
5800
            if (IS_USER(s) && (env->teecr & 1))
5801
                return 1;
5802
            tmp = load_reg(s, rt);
5803
            store_cpu_field(tmp, teehbr);
5804
            return 0;
5805
        }
5806
    }
5807
    fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5808
            op1, crn, crm, op2);
5809
    return 1;
5810
}
5811

    
5812
static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5813
{
5814
    int cpnum;
5815

    
5816
    cpnum = (insn >> 8) & 0xf;
5817
    if (arm_feature(env, ARM_FEATURE_XSCALE)
5818
            && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5819
        return 1;
5820

    
5821
    switch (cpnum) {
5822
      case 0:
5823
      case 1:
5824
        if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5825
            return disas_iwmmxt_insn(env, s, insn);
5826
        } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5827
            return disas_dsp_insn(env, s, insn);
5828
        }
5829
        return 1;
5830
    case 10:
5831
    case 11:
5832
        return disas_vfp_insn (env, s, insn);
5833
    case 14:
5834
        /* Coprocessors 7-15 are architecturally reserved by ARM.
5835
           Unfortunately Intel decided to ignore this.  */
5836
        if (arm_feature(env, ARM_FEATURE_XSCALE))
5837
            goto board;
5838
        if (insn & (1 << 20))
5839
            return disas_cp14_read(env, s, insn);
5840
        else
5841
            return disas_cp14_write(env, s, insn);
5842
    case 15:
5843
        return disas_cp15_insn (env, s, insn);
5844
    default:
5845
    board:
5846
        /* Unknown coprocessor.  See if the board has hooked it.  */
5847
        return disas_cp_insn (env, s, insn);
5848
    }
5849
}
5850

    
5851

    
5852
/* Store a 64-bit value to a register pair.  Clobbers val.  */
5853
static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5854
{
5855
    TCGv tmp;
5856
    tmp = new_tmp();
5857
    tcg_gen_trunc_i64_i32(tmp, val);
5858
    store_reg(s, rlow, tmp);
5859
    tmp = new_tmp();
5860
    tcg_gen_shri_i64(val, val, 32);
5861
    tcg_gen_trunc_i64_i32(tmp, val);
5862
    store_reg(s, rhigh, tmp);
5863
}
5864

    
5865
/* load a 32-bit value from a register and perform a 64-bit accumulate.  */
5866
static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5867
{
5868
    TCGv_i64 tmp;
5869
    TCGv tmp2;
5870

    
5871
    /* Load value and extend to 64 bits.  */
5872
    tmp = tcg_temp_new_i64();
5873
    tmp2 = load_reg(s, rlow);
5874
    tcg_gen_extu_i32_i64(tmp, tmp2);
5875
    dead_tmp(tmp2);
5876
    tcg_gen_add_i64(val, val, tmp);
5877
    tcg_temp_free_i64(tmp);
5878
}
5879

    
5880
/* load and add a 64-bit value from a register pair.  */
5881
static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5882
{
5883
    TCGv_i64 tmp;
5884
    TCGv tmpl;
5885
    TCGv tmph;
5886

    
5887
    /* Load 64-bit value rd:rn.  */
5888
    tmpl = load_reg(s, rlow);
5889
    tmph = load_reg(s, rhigh);
5890
    tmp = tcg_temp_new_i64();
5891
    tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5892
    dead_tmp(tmpl);
5893
    dead_tmp(tmph);
5894
    tcg_gen_add_i64(val, val, tmp);
5895
    tcg_temp_free_i64(tmp);
5896
}
5897

    
5898
/* Set N and Z flags from a 64-bit value.  */
5899
static void gen_logicq_cc(TCGv_i64 val)
5900
{
5901
    TCGv tmp = new_tmp();
5902
    gen_helper_logicq_cc(tmp, val);
5903
    gen_logic_CC(tmp);
5904
    dead_tmp(tmp);
5905
}
5906

    
5907
/* Load/Store exclusive instructions are implemented by remembering
5908
   the value/address loaded, and seeing if these are the same
5909
   when the store is performed. This should be is sufficient to implement
5910
   the architecturally mandated semantics, and avoids having to monitor
5911
   regular stores.
5912

5913
   In system emulation mode only one CPU will be running at once, so
5914
   this sequence is effectively atomic.  In user emulation mode we
5915
   throw an exception and handle the atomic operation elsewhere.  */
5916
static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5917
                               TCGv addr, int size)
5918
{
5919
    TCGv tmp;
5920

    
5921
    switch (size) {
5922
    case 0:
5923
        tmp = gen_ld8u(addr, IS_USER(s));
5924
        break;
5925
    case 1:
5926
        tmp = gen_ld16u(addr, IS_USER(s));
5927
        break;
5928
    case 2:
5929
    case 3:
5930
        tmp = gen_ld32(addr, IS_USER(s));
5931
        break;
5932
    default:
5933
        abort();
5934
    }
5935
    tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5936
    store_reg(s, rt, tmp);
5937
    if (size == 3) {
5938
        TCGv tmp2 = new_tmp();
5939
        tcg_gen_addi_i32(tmp2, addr, 4);
5940
        tmp = gen_ld32(tmp2, IS_USER(s));
5941
        dead_tmp(tmp2);
5942
        tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5943
        store_reg(s, rt2, tmp);
5944
    }
5945
    tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5946
}
5947

    
5948
static void gen_clrex(DisasContext *s)
5949
{
5950
    tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5951
}
5952

    
5953
#ifdef CONFIG_USER_ONLY
5954
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5955
                                TCGv addr, int size)
5956
{
5957
    tcg_gen_mov_i32(cpu_exclusive_test, addr);
5958
    tcg_gen_movi_i32(cpu_exclusive_info,
5959
                     size | (rd << 4) | (rt << 8) | (rt2 << 12));
5960
    gen_set_condexec(s);
5961
    gen_set_pc_im(s->pc - 4);
5962
    gen_exception(EXCP_STREX);
5963
    s->is_jmp = DISAS_JUMP;
5964
}
5965
#else
5966
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5967
                                TCGv addr, int size)
5968
{
5969
    TCGv tmp;
5970
    int done_label;
5971
    int fail_label;
5972

    
5973
    /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5974
         [addr] = {Rt};
5975
         {Rd} = 0;
5976
       } else {
5977
         {Rd} = 1;
5978
       } */
5979
    fail_label = gen_new_label();
5980
    done_label = gen_new_label();
5981
    tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
5982
    switch (size) {
5983
    case 0:
5984
        tmp = gen_ld8u(addr, IS_USER(s));
5985
        break;
5986
    case 1:
5987
        tmp = gen_ld16u(addr, IS_USER(s));
5988
        break;
5989
    case 2:
5990
    case 3:
5991
        tmp = gen_ld32(addr, IS_USER(s));
5992
        break;
5993
    default:
5994
        abort();
5995
    }
5996
    tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
5997
    dead_tmp(tmp);
5998
    if (size == 3) {
5999
        TCGv tmp2 = new_tmp();
6000
        tcg_gen_addi_i32(tmp2, addr, 4);
6001
        tmp = gen_ld32(tmp2, IS_USER(s));
6002
        dead_tmp(tmp2);
6003
        tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6004
        dead_tmp(tmp);
6005
    }
6006
    tmp = load_reg(s, rt);
6007
    switch (size) {
6008
    case 0:
6009
        gen_st8(tmp, addr, IS_USER(s));
6010
        break;
6011
    case 1:
6012
        gen_st16(tmp, addr, IS_USER(s));
6013
        break;
6014
    case 2:
6015
    case 3:
6016
        gen_st32(tmp, addr, IS_USER(s));
6017
        break;
6018
    default:
6019
        abort();
6020
    }
6021
    if (size == 3) {
6022
        tcg_gen_addi_i32(addr, addr, 4);
6023
        tmp = load_reg(s, rt2);
6024
        gen_st32(tmp, addr, IS_USER(s));
6025
    }
6026
    tcg_gen_movi_i32(cpu_R[rd], 0);
6027
    tcg_gen_br(done_label);
6028
    gen_set_label(fail_label);
6029
    tcg_gen_movi_i32(cpu_R[rd], 1);
6030
    gen_set_label(done_label);
6031
    tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6032
}
6033
#endif
6034

    
6035
static void disas_arm_insn(CPUState * env, DisasContext *s)
6036
{
6037
    unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6038
    TCGv tmp;
6039
    TCGv tmp2;
6040
    TCGv tmp3;
6041
    TCGv addr;
6042
    TCGv_i64 tmp64;
6043

    
6044
    insn = ldl_code(s->pc);
6045
    s->pc += 4;
6046

    
6047
    /* M variants do not implement ARM mode.  */
6048
    if (IS_M(env))
6049
        goto illegal_op;
6050
    cond = insn >> 28;
6051
    if (cond == 0xf){
6052
        /* Unconditional instructions.  */
6053
        if (((insn >> 25) & 7) == 1) {
6054
            /* NEON Data processing.  */
6055
            if (!arm_feature(env, ARM_FEATURE_NEON))
6056
                goto illegal_op;
6057

    
6058
            if (disas_neon_data_insn(env, s, insn))
6059
                goto illegal_op;
6060
            return;
6061
        }
6062
        if ((insn & 0x0f100000) == 0x04000000) {
6063
            /* NEON load/store.  */
6064
            if (!arm_feature(env, ARM_FEATURE_NEON))
6065
                goto illegal_op;
6066

    
6067
            if (disas_neon_ls_insn(env, s, insn))
6068
                goto illegal_op;
6069
            return;
6070
        }
6071
        if ((insn & 0x0d70f000) == 0x0550f000)
6072
            return; /* PLD */
6073
        else if ((insn & 0x0ffffdff) == 0x01010000) {
6074
            ARCH(6);
6075
            /* setend */
6076
            if (insn & (1 << 9)) {
6077
                /* BE8 mode not implemented.  */
6078
                goto illegal_op;
6079
            }
6080
            return;
6081
        } else if ((insn & 0x0fffff00) == 0x057ff000) {
6082
            switch ((insn >> 4) & 0xf) {
6083
            case 1: /* clrex */
6084
                ARCH(6K);
6085
                gen_clrex(s);
6086
                return;
6087
            case 4: /* dsb */
6088
            case 5: /* dmb */
6089
            case 6: /* isb */
6090
                ARCH(7);
6091
                /* We don't emulate caches so these are a no-op.  */
6092
                return;
6093
            default:
6094
                goto illegal_op;
6095
            }
6096
        } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6097
            /* srs */
6098
            int32_t offset;
6099
            if (IS_USER(s))
6100
                goto illegal_op;
6101
            ARCH(6);
6102
            op1 = (insn & 0x1f);
6103
            if (op1 == (env->uncached_cpsr & CPSR_M)) {
6104
                addr = load_reg(s, 13);
6105
            } else {
6106
                addr = new_tmp();
6107
                tmp = tcg_const_i32(op1);
6108
                gen_helper_get_r13_banked(addr, cpu_env, tmp);
6109
                tcg_temp_free_i32(tmp);
6110
            }
6111
            i = (insn >> 23) & 3;
6112
            switch (i) {
6113
            case 0: offset = -4; break; /* DA */
6114
            case 1: offset = 0; break; /* IA */
6115
            case 2: offset = -8; break; /* DB */
6116
            case 3: offset = 4; break; /* IB */
6117
            default: abort();
6118
            }
6119
            if (offset)
6120
                tcg_gen_addi_i32(addr, addr, offset);
6121
            tmp = load_reg(s, 14);
6122
            gen_st32(tmp, addr, 0);
6123
            tmp = load_cpu_field(spsr);
6124
            tcg_gen_addi_i32(addr, addr, 4);
6125
            gen_st32(tmp, addr, 0);
6126
            if (insn & (1 << 21)) {
6127
                /* Base writeback.  */
6128
                switch (i) {
6129
                case 0: offset = -8; break;
6130
                case 1: offset = 4; break;
6131
                case 2: offset = -4; break;
6132
                case 3: offset = 0; break;
6133
                default: abort();
6134
                }
6135
                if (offset)
6136
                    tcg_gen_addi_i32(addr, addr, offset);
6137
                if (op1 == (env->uncached_cpsr & CPSR_M)) {
6138
                    store_reg(s, 13, addr);
6139
                } else {
6140
                    tmp = tcg_const_i32(op1);
6141
                    gen_helper_set_r13_banked(cpu_env, tmp, addr);
6142
                    tcg_temp_free_i32(tmp);
6143
                    dead_tmp(addr);
6144
                }
6145
            } else {
6146
                dead_tmp(addr);
6147
            }
6148
            return;
6149
        } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6150
            /* rfe */
6151
            int32_t offset;
6152
            if (IS_USER(s))
6153
                goto illegal_op;
6154
            ARCH(6);
6155
            rn = (insn >> 16) & 0xf;
6156
            addr = load_reg(s, rn);
6157
            i = (insn >> 23) & 3;
6158
            switch (i) {
6159
            case 0: offset = -4; break; /* DA */
6160
            case 1: offset = 0; break; /* IA */
6161
            case 2: offset = -8; break; /* DB */
6162
            case 3: offset = 4; break; /* IB */
6163
            default: abort();
6164
            }
6165
            if (offset)
6166
                tcg_gen_addi_i32(addr, addr, offset);
6167
            /* Load PC into tmp and CPSR into tmp2.  */
6168
            tmp = gen_ld32(addr, 0);
6169
            tcg_gen_addi_i32(addr, addr, 4);
6170
            tmp2 = gen_ld32(addr, 0);
6171
            if (insn & (1 << 21)) {
6172
                /* Base writeback.  */
6173
                switch (i) {
6174
                case 0: offset = -8; break;
6175
                case 1: offset = 4; break;
6176
                case 2: offset = -4; break;
6177
                case 3: offset = 0; break;
6178
                default: abort();
6179
                }
6180
                if (offset)
6181
                    tcg_gen_addi_i32(addr, addr, offset);
6182
                store_reg(s, rn, addr);
6183
            } else {
6184
                dead_tmp(addr);
6185
            }
6186
            gen_rfe(s, tmp, tmp2);
6187
            return;
6188
        } else if ((insn & 0x0e000000) == 0x0a000000) {
6189
            /* branch link and change to thumb (blx <offset>) */
6190
            int32_t offset;
6191

    
6192
            val = (uint32_t)s->pc;
6193
            tmp = new_tmp();
6194
            tcg_gen_movi_i32(tmp, val);
6195
            store_reg(s, 14, tmp);
6196
            /* Sign-extend the 24-bit offset */
6197
            offset = (((int32_t)insn) << 8) >> 8;
6198
            /* offset * 4 + bit24 * 2 + (thumb bit) */
6199
            val += (offset << 2) | ((insn >> 23) & 2) | 1;
6200
            /* pipeline offset */
6201
            val += 4;
6202
            gen_bx_im(s, val);
6203
            return;
6204
        } else if ((insn & 0x0e000f00) == 0x0c000100) {
6205
            if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6206
                /* iWMMXt register transfer.  */
6207
                if (env->cp15.c15_cpar & (1 << 1))
6208
                    if (!disas_iwmmxt_insn(env, s, insn))
6209
                        return;
6210
            }
6211
        } else if ((insn & 0x0fe00000) == 0x0c400000) {
6212
            /* Coprocessor double register transfer.  */
6213
        } else if ((insn & 0x0f000010) == 0x0e000010) {
6214
            /* Additional coprocessor register transfer.  */
6215
        } else if ((insn & 0x0ff10020) == 0x01000000) {
6216
            uint32_t mask;
6217
            uint32_t val;
6218
            /* cps (privileged) */
6219
            if (IS_USER(s))
6220
                return;
6221
            mask = val = 0;
6222
            if (insn & (1 << 19)) {
6223
                if (insn & (1 << 8))
6224
                    mask |= CPSR_A;
6225
                if (insn & (1 << 7))
6226
                    mask |= CPSR_I;
6227
                if (insn & (1 << 6))
6228
                    mask |= CPSR_F;
6229
                if (insn & (1 << 18))
6230
                    val |= mask;
6231
            }
6232
            if (insn & (1 << 17)) {
6233
                mask |= CPSR_M;
6234
                val |= (insn & 0x1f);
6235
            }
6236
            if (mask) {
6237
                gen_set_psr_im(s, mask, 0, val);
6238
            }
6239
            return;
6240
        }
6241
        goto illegal_op;
6242
    }
6243
    if (cond != 0xe) {
6244
        /* if not always execute, we generate a conditional jump to
6245
           next instruction */
6246
        s->condlabel = gen_new_label();
6247
        gen_test_cc(cond ^ 1, s->condlabel);
6248
        s->condjmp = 1;
6249
    }
6250
    if ((insn & 0x0f900000) == 0x03000000) {
6251
        if ((insn & (1 << 21)) == 0) {
6252
            ARCH(6T2);
6253
            rd = (insn >> 12) & 0xf;
6254
            val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6255
            if ((insn & (1 << 22)) == 0) {
6256
                /* MOVW */
6257
                tmp = new_tmp();
6258
                tcg_gen_movi_i32(tmp, val);
6259
            } else {
6260
                /* MOVT */
6261
                tmp = load_reg(s, rd);
6262
                tcg_gen_ext16u_i32(tmp, tmp);
6263
                tcg_gen_ori_i32(tmp, tmp, val << 16);
6264
            }
6265
            store_reg(s, rd, tmp);
6266
        } else {
6267
            if (((insn >> 12) & 0xf) != 0xf)
6268
                goto illegal_op;
6269
            if (((insn >> 16) & 0xf) == 0) {
6270
                gen_nop_hint(s, insn & 0xff);
6271
            } else {
6272
                /* CPSR = immediate */
6273
                val = insn & 0xff;
6274
                shift = ((insn >> 8) & 0xf) * 2;
6275
                if (shift)
6276
                    val = (val >> shift) | (val << (32 - shift));
6277
                i = ((insn & (1 << 22)) != 0);
6278
                if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6279
                    goto illegal_op;
6280
            }
6281
        }
6282
    } else if ((insn & 0x0f900000) == 0x01000000
6283
               && (insn & 0x00000090) != 0x00000090) {
6284
        /* miscellaneous instructions */
6285
        op1 = (insn >> 21) & 3;
6286
        sh = (insn >> 4) & 0xf;
6287
        rm = insn & 0xf;
6288
        switch (sh) {
6289
        case 0x0: /* move program status register */
6290
            if (op1 & 1) {
6291
                /* PSR = reg */
6292
                tmp = load_reg(s, rm);
6293
                i = ((op1 & 2) != 0);
6294
                if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6295
                    goto illegal_op;
6296
            } else {
6297
                /* reg = PSR */
6298
                rd = (insn >> 12) & 0xf;
6299
                if (op1 & 2) {
6300
                    if (IS_USER(s))
6301
                        goto illegal_op;
6302
                    tmp = load_cpu_field(spsr);
6303
                } else {
6304
                    tmp = new_tmp();
6305
                    gen_helper_cpsr_read(tmp);
6306
                }
6307
                store_reg(s, rd, tmp);
6308
            }
6309
            break;
6310
        case 0x1:
6311
            if (op1 == 1) {
6312
                /* branch/exchange thumb (bx).  */
6313
                tmp = load_reg(s, rm);
6314
                gen_bx(s, tmp);
6315
            } else if (op1 == 3) {
6316
                /* clz */
6317
                rd = (insn >> 12) & 0xf;
6318
                tmp = load_reg(s, rm);
6319
                gen_helper_clz(tmp, tmp);
6320
                store_reg(s, rd, tmp);
6321
            } else {
6322
                goto illegal_op;
6323
            }
6324
            break;
6325
        case 0x2:
6326
            if (op1 == 1) {
6327
                ARCH(5J); /* bxj */
6328
                /* Trivial implementation equivalent to bx.  */
6329
                tmp = load_reg(s, rm);
6330
                gen_bx(s, tmp);
6331
            } else {
6332
                goto illegal_op;
6333
            }
6334
            break;
6335
        case 0x3:
6336
            if (op1 != 1)
6337
              goto illegal_op;
6338

    
6339
            /* branch link/exchange thumb (blx) */
6340
            tmp = load_reg(s, rm);
6341
            tmp2 = new_tmp();
6342
            tcg_gen_movi_i32(tmp2, s->pc);
6343
            store_reg(s, 14, tmp2);
6344
            gen_bx(s, tmp);
6345
            break;
6346
        case 0x5: /* saturating add/subtract */
6347
            rd = (insn >> 12) & 0xf;
6348
            rn = (insn >> 16) & 0xf;
6349
            tmp = load_reg(s, rm);
6350
            tmp2 = load_reg(s, rn);
6351
            if (op1 & 2)
6352
                gen_helper_double_saturate(tmp2, tmp2);
6353
            if (op1 & 1)
6354
                gen_helper_sub_saturate(tmp, tmp, tmp2);
6355
            else
6356
                gen_helper_add_saturate(tmp, tmp, tmp2);
6357
            dead_tmp(tmp2);
6358
            store_reg(s, rd, tmp);
6359
            break;
6360
        case 7:
6361
            /* SMC instruction (op1 == 3)
6362
               and undefined instructions (op1 == 0 || op1 == 2)
6363
               will trap */
6364
            if (op1 != 1) {
6365
                goto illegal_op;
6366
            }
6367
            /* bkpt */
6368
            gen_set_condexec(s);
6369
            gen_set_pc_im(s->pc - 4);
6370
            gen_exception(EXCP_BKPT);
6371
            s->is_jmp = DISAS_JUMP;
6372
            break;
6373
        case 0x8: /* signed multiply */
6374
        case 0xa:
6375
        case 0xc:
6376
        case 0xe:
6377
            rs = (insn >> 8) & 0xf;
6378
            rn = (insn >> 12) & 0xf;
6379
            rd = (insn >> 16) & 0xf;
6380
            if (op1 == 1) {
6381
                /* (32 * 16) >> 16 */
6382
                tmp = load_reg(s, rm);
6383
                tmp2 = load_reg(s, rs);
6384
                if (sh & 4)
6385
                    tcg_gen_sari_i32(tmp2, tmp2, 16);
6386
                else
6387
                    gen_sxth(tmp2);
6388
                tmp64 = gen_muls_i64_i32(tmp, tmp2);
6389
                tcg_gen_shri_i64(tmp64, tmp64, 16);
6390
                tmp = new_tmp();
6391
                tcg_gen_trunc_i64_i32(tmp, tmp64);
6392
                tcg_temp_free_i64(tmp64);
6393
                if ((sh & 2) == 0) {
6394
                    tmp2 = load_reg(s, rn);
6395
                    gen_helper_add_setq(tmp, tmp, tmp2);
6396
                    dead_tmp(tmp2);
6397
                }
6398
                store_reg(s, rd, tmp);
6399
            } else {
6400
                /* 16 * 16 */
6401
                tmp = load_reg(s, rm);
6402
                tmp2 = load_reg(s, rs);
6403
                gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6404
                dead_tmp(tmp2);
6405
                if (op1 == 2) {
6406
                    tmp64 = tcg_temp_new_i64();
6407
                    tcg_gen_ext_i32_i64(tmp64, tmp);
6408
                    dead_tmp(tmp);
6409
                    gen_addq(s, tmp64, rn, rd);
6410
                    gen_storeq_reg(s, rn, rd, tmp64);
6411
                    tcg_temp_free_i64(tmp64);
6412
                } else {
6413
                    if (op1 == 0) {
6414
                        tmp2 = load_reg(s, rn);
6415
                        gen_helper_add_setq(tmp, tmp, tmp2);
6416
                        dead_tmp(tmp2);
6417
                    }
6418
                    store_reg(s, rd, tmp);
6419
                }
6420
            }
6421
            break;
6422
        default:
6423
            goto illegal_op;
6424
        }
6425
    } else if (((insn & 0x0e000000) == 0 &&
6426
                (insn & 0x00000090) != 0x90) ||
6427
               ((insn & 0x0e000000) == (1 << 25))) {
6428
        int set_cc, logic_cc, shiftop;
6429

    
6430
        op1 = (insn >> 21) & 0xf;
6431
        set_cc = (insn >> 20) & 1;
6432
        logic_cc = table_logic_cc[op1] & set_cc;
6433

    
6434
        /* data processing instruction */
6435
        if (insn & (1 << 25)) {
6436
            /* immediate operand */
6437
            val = insn & 0xff;
6438
            shift = ((insn >> 8) & 0xf) * 2;
6439
            if (shift) {
6440
                val = (val >> shift) | (val << (32 - shift));
6441
            }
6442
            tmp2 = new_tmp();
6443
            tcg_gen_movi_i32(tmp2, val);
6444
            if (logic_cc && shift) {
6445
                gen_set_CF_bit31(tmp2);
6446
            }
6447
        } else {
6448
            /* register */
6449
            rm = (insn) & 0xf;
6450
            tmp2 = load_reg(s, rm);
6451
            shiftop = (insn >> 5) & 3;
6452
            if (!(insn & (1 << 4))) {
6453
                shift = (insn >> 7) & 0x1f;
6454
                gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6455
            } else {
6456
                rs = (insn >> 8) & 0xf;
6457
                tmp = load_reg(s, rs);
6458
                gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6459
            }
6460
        }
6461
        if (op1 != 0x0f && op1 != 0x0d) {
6462
            rn = (insn >> 16) & 0xf;
6463
            tmp = load_reg(s, rn);
6464
        } else {
6465
            TCGV_UNUSED(tmp);
6466
        }
6467
        rd = (insn >> 12) & 0xf;
6468
        switch(op1) {
6469
        case 0x00:
6470
            tcg_gen_and_i32(tmp, tmp, tmp2);
6471
            if (logic_cc) {
6472
                gen_logic_CC(tmp);
6473
            }
6474
            store_reg_bx(env, s, rd, tmp);
6475
            break;
6476
        case 0x01:
6477
            tcg_gen_xor_i32(tmp, tmp, tmp2);
6478
            if (logic_cc) {
6479
                gen_logic_CC(tmp);
6480
            }
6481
            store_reg_bx(env, s, rd, tmp);
6482
            break;
6483
        case 0x02:
6484
            if (set_cc && rd == 15) {
6485
                /* SUBS r15, ... is used for exception return.  */
6486
                if (IS_USER(s)) {
6487
                    goto illegal_op;
6488
                }
6489
                gen_helper_sub_cc(tmp, tmp, tmp2);
6490
                gen_exception_return(s, tmp);
6491
            } else {
6492
                if (set_cc) {
6493
                    gen_helper_sub_cc(tmp, tmp, tmp2);
6494
                } else {
6495
                    tcg_gen_sub_i32(tmp, tmp, tmp2);
6496
                }
6497
                store_reg_bx(env, s, rd, tmp);
6498
            }
6499
            break;
6500
        case 0x03:
6501
            if (set_cc) {
6502
                gen_helper_sub_cc(tmp, tmp2, tmp);
6503
            } else {
6504
                tcg_gen_sub_i32(tmp, tmp2, tmp);
6505
            }
6506
            store_reg_bx(env, s, rd, tmp);
6507
            break;
6508
        case 0x04:
6509
            if (set_cc) {
6510
                gen_helper_add_cc(tmp, tmp, tmp2);
6511
            } else {
6512
                tcg_gen_add_i32(tmp, tmp, tmp2);
6513
            }
6514
            store_reg_bx(env, s, rd, tmp);
6515
            break;
6516
        case 0x05:
6517
            if (set_cc) {
6518
                gen_helper_adc_cc(tmp, tmp, tmp2);
6519
            } else {
6520
                gen_add_carry(tmp, tmp, tmp2);
6521
            }
6522
            store_reg_bx(env, s, rd, tmp);
6523
            break;
6524
        case 0x06:
6525
            if (set_cc) {
6526
                gen_helper_sbc_cc(tmp, tmp, tmp2);
6527
            } else {
6528
                gen_sub_carry(tmp, tmp, tmp2);
6529
            }
6530
            store_reg_bx(env, s, rd, tmp);
6531
            break;
6532
        case 0x07:
6533
            if (set_cc) {
6534
                gen_helper_sbc_cc(tmp, tmp2, tmp);
6535
            } else {
6536
                gen_sub_carry(tmp, tmp2, tmp);
6537
            }
6538
            store_reg_bx(env, s, rd, tmp);
6539
            break;
6540
        case 0x08:
6541
            if (set_cc) {
6542
                tcg_gen_and_i32(tmp, tmp, tmp2);
6543
                gen_logic_CC(tmp);
6544
            }
6545
            dead_tmp(tmp);
6546
            break;
6547
        case 0x09:
6548
            if (set_cc) {
6549
                tcg_gen_xor_i32(tmp, tmp, tmp2);
6550
                gen_logic_CC(tmp);
6551
            }
6552
            dead_tmp(tmp);
6553
            break;
6554
        case 0x0a:
6555
            if (set_cc) {
6556
                gen_helper_sub_cc(tmp, tmp, tmp2);
6557
            }
6558
            dead_tmp(tmp);
6559
            break;
6560
        case 0x0b:
6561
            if (set_cc) {
6562
                gen_helper_add_cc(tmp, tmp, tmp2);
6563
            }
6564
            dead_tmp(tmp);
6565
            break;
6566
        case 0x0c:
6567
            tcg_gen_or_i32(tmp, tmp, tmp2);
6568
            if (logic_cc) {
6569
                gen_logic_CC(tmp);
6570
            }
6571
            store_reg_bx(env, s, rd, tmp);
6572
            break;
6573
        case 0x0d:
6574
            if (logic_cc && rd == 15) {
6575
                /* MOVS r15, ... is used for exception return.  */
6576
                if (IS_USER(s)) {
6577
                    goto illegal_op;
6578
                }
6579
                gen_exception_return(s, tmp2);
6580
            } else {
6581
                if (logic_cc) {
6582
                    gen_logic_CC(tmp2);
6583
                }
6584
                store_reg_bx(env, s, rd, tmp2);
6585
            }
6586
            break;
6587
        case 0x0e:
6588
            tcg_gen_andc_i32(tmp, tmp, tmp2);
6589
            if (logic_cc) {
6590
                gen_logic_CC(tmp);
6591
            }
6592
            store_reg_bx(env, s, rd, tmp);
6593
            break;
6594
        default:
6595
        case 0x0f:
6596
            tcg_gen_not_i32(tmp2, tmp2);
6597
            if (logic_cc) {
6598
                gen_logic_CC(tmp2);
6599
            }
6600
            store_reg_bx(env, s, rd, tmp2);
6601
            break;
6602
        }
6603
        if (op1 != 0x0f && op1 != 0x0d) {
6604
            dead_tmp(tmp2);
6605
        }
6606
    } else {
6607
        /* other instructions */
6608
        op1 = (insn >> 24) & 0xf;
6609
        switch(op1) {
6610
        case 0x0:
6611
        case 0x1:
6612
            /* multiplies, extra load/stores */
6613
            sh = (insn >> 5) & 3;
6614
            if (sh == 0) {
6615
                if (op1 == 0x0) {
6616
                    rd = (insn >> 16) & 0xf;
6617
                    rn = (insn >> 12) & 0xf;
6618
                    rs = (insn >> 8) & 0xf;
6619
                    rm = (insn) & 0xf;
6620
                    op1 = (insn >> 20) & 0xf;
6621
                    switch (op1) {
6622
                    case 0: case 1: case 2: case 3: case 6:
6623
                        /* 32 bit mul */
6624
                        tmp = load_reg(s, rs);
6625
                        tmp2 = load_reg(s, rm);
6626
                        tcg_gen_mul_i32(tmp, tmp, tmp2);
6627
                        dead_tmp(tmp2);
6628
                        if (insn & (1 << 22)) {
6629
                            /* Subtract (mls) */
6630
                            ARCH(6T2);
6631
                            tmp2 = load_reg(s, rn);
6632
                            tcg_gen_sub_i32(tmp, tmp2, tmp);
6633
                            dead_tmp(tmp2);
6634
                        } else if (insn & (1 << 21)) {
6635
                            /* Add */
6636
                            tmp2 = load_reg(s, rn);
6637
                            tcg_gen_add_i32(tmp, tmp, tmp2);
6638
                            dead_tmp(tmp2);
6639
                        }
6640
                        if (insn & (1 << 20))
6641
                            gen_logic_CC(tmp);
6642
                        store_reg(s, rd, tmp);
6643
                        break;
6644
                    default:
6645
                        /* 64 bit mul */
6646
                        tmp = load_reg(s, rs);
6647
                        tmp2 = load_reg(s, rm);
6648
                        if (insn & (1 << 22))
6649
                            tmp64 = gen_muls_i64_i32(tmp, tmp2);
6650
                        else
6651
                            tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6652
                        if (insn & (1 << 21)) /* mult accumulate */
6653
                            gen_addq(s, tmp64, rn, rd);
6654
                        if (!(insn & (1 << 23))) { /* double accumulate */
6655
                            ARCH(6);
6656
                            gen_addq_lo(s, tmp64, rn);
6657
                            gen_addq_lo(s, tmp64, rd);
6658
                        }
6659
                        if (insn & (1 << 20))
6660
                            gen_logicq_cc(tmp64);
6661
                        gen_storeq_reg(s, rn, rd, tmp64);
6662
                        tcg_temp_free_i64(tmp64);
6663
                        break;
6664
                    }
6665
                } else {
6666
                    rn = (insn >> 16) & 0xf;
6667
                    rd = (insn >> 12) & 0xf;
6668
                    if (insn & (1 << 23)) {
6669
                        /* load/store exclusive */
6670
                        op1 = (insn >> 21) & 0x3;
6671
                        if (op1)
6672
                            ARCH(6K);
6673
                        else
6674
                            ARCH(6);
6675
                        addr = tcg_temp_local_new_i32();
6676
                        load_reg_var(s, addr, rn);
6677
                        if (insn & (1 << 20)) {
6678
                            switch (op1) {
6679
                            case 0: /* ldrex */
6680
                                gen_load_exclusive(s, rd, 15, addr, 2);
6681
                                break;
6682
                            case 1: /* ldrexd */
6683
                                gen_load_exclusive(s, rd, rd + 1, addr, 3);
6684
                                break;
6685
                            case 2: /* ldrexb */
6686
                                gen_load_exclusive(s, rd, 15, addr, 0);
6687
                                break;
6688
                            case 3: /* ldrexh */
6689
                                gen_load_exclusive(s, rd, 15, addr, 1);
6690
                                break;
6691
                            default:
6692
                                abort();
6693
                            }
6694
                        } else {
6695
                            rm = insn & 0xf;
6696
                            switch (op1) {
6697
                            case 0:  /*  strex */
6698
                                gen_store_exclusive(s, rd, rm, 15, addr, 2);
6699
                                break;
6700
                            case 1: /*  strexd */
6701
                                gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6702
                                break;
6703
                            case 2: /*  strexb */
6704
                                gen_store_exclusive(s, rd, rm, 15, addr, 0);
6705
                                break;
6706
                            case 3: /* strexh */
6707
                                gen_store_exclusive(s, rd, rm, 15, addr, 1);
6708
                                break;
6709
                            default:
6710
                                abort();
6711
                            }
6712
                        }
6713
                        tcg_temp_free(addr);
6714
                    } else {
6715
                        /* SWP instruction */
6716
                        rm = (insn) & 0xf;
6717

    
6718
                        /* ??? This is not really atomic.  However we know
6719
                           we never have multiple CPUs running in parallel,
6720
                           so it is good enough.  */
6721
                        addr = load_reg(s, rn);
6722
                        tmp = load_reg(s, rm);
6723
                        if (insn & (1 << 22)) {
6724
                            tmp2 = gen_ld8u(addr, IS_USER(s));
6725
                            gen_st8(tmp, addr, IS_USER(s));
6726
                        } else {
6727
                            tmp2 = gen_ld32(addr, IS_USER(s));
6728
                            gen_st32(tmp, addr, IS_USER(s));
6729
                        }
6730
                        dead_tmp(addr);
6731
                        store_reg(s, rd, tmp2);
6732
                    }
6733
                }
6734
            } else {
6735
                int address_offset;
6736
                int load;
6737
                /* Misc load/store */
6738
                rn = (insn >> 16) & 0xf;
6739
                rd = (insn >> 12) & 0xf;
6740
                addr = load_reg(s, rn);
6741
                if (insn & (1 << 24))
6742
                    gen_add_datah_offset(s, insn, 0, addr);
6743
                address_offset = 0;
6744
                if (insn & (1 << 20)) {
6745
                    /* load */
6746
                    switch(sh) {
6747
                    case 1:
6748
                        tmp = gen_ld16u(addr, IS_USER(s));
6749
                        break;
6750
                    case 2:
6751
                        tmp = gen_ld8s(addr, IS_USER(s));
6752
                        break;
6753
                    default:
6754
                    case 3:
6755
                        tmp = gen_ld16s(addr, IS_USER(s));
6756
                        break;
6757
                    }
6758
                    load = 1;
6759
                } else if (sh & 2) {
6760
                    /* doubleword */
6761
                    if (sh & 1) {
6762
                        /* store */
6763
                        tmp = load_reg(s, rd);
6764
                        gen_st32(tmp, addr, IS_USER(s));
6765
                        tcg_gen_addi_i32(addr, addr, 4);
6766
                        tmp = load_reg(s, rd + 1);
6767
                        gen_st32(tmp, addr, IS_USER(s));
6768
                        load = 0;
6769
                    } else {
6770
                        /* load */
6771
                        tmp = gen_ld32(addr, IS_USER(s));
6772
                        store_reg(s, rd, tmp);
6773
                        tcg_gen_addi_i32(addr, addr, 4);
6774
                        tmp = gen_ld32(addr, IS_USER(s));
6775
                        rd++;
6776
                        load = 1;
6777
                    }
6778
                    address_offset = -4;
6779
                } else {
6780
                    /* store */
6781
                    tmp = load_reg(s, rd);
6782
                    gen_st16(tmp, addr, IS_USER(s));
6783
                    load = 0;
6784
                }
6785
                /* Perform base writeback before the loaded value to
6786
                   ensure correct behavior with overlapping index registers.
6787
                   ldrd with base writeback is is undefined if the
6788
                   destination and index registers overlap.  */
6789
                if (!(insn & (1 << 24))) {
6790
                    gen_add_datah_offset(s, insn, address_offset, addr);
6791
                    store_reg(s, rn, addr);
6792
                } else if (insn & (1 << 21)) {
6793
                    if (address_offset)
6794
                        tcg_gen_addi_i32(addr, addr, address_offset);
6795
                    store_reg(s, rn, addr);
6796
                } else {
6797
                    dead_tmp(addr);
6798
                }
6799
                if (load) {
6800
                    /* Complete the load.  */
6801
                    store_reg(s, rd, tmp);
6802
                }
6803
            }
6804
            break;
6805
        case 0x4:
6806
        case 0x5:
6807
            goto do_ldst;
6808
        case 0x6:
6809
        case 0x7:
6810
            if (insn & (1 << 4)) {
6811
                ARCH(6);
6812
                /* Armv6 Media instructions.  */
6813
                rm = insn & 0xf;
6814
                rn = (insn >> 16) & 0xf;
6815
                rd = (insn >> 12) & 0xf;
6816
                rs = (insn >> 8) & 0xf;
6817
                switch ((insn >> 23) & 3) {
6818
                case 0: /* Parallel add/subtract.  */
6819
                    op1 = (insn >> 20) & 7;
6820
                    tmp = load_reg(s, rn);
6821
                    tmp2 = load_reg(s, rm);
6822
                    sh = (insn >> 5) & 7;
6823
                    if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6824
                        goto illegal_op;
6825
                    gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6826
                    dead_tmp(tmp2);
6827
                    store_reg(s, rd, tmp);
6828
                    break;
6829
                case 1:
6830
                    if ((insn & 0x00700020) == 0) {
6831
                        /* Halfword pack.  */
6832
                        tmp = load_reg(s, rn);
6833
                        tmp2 = load_reg(s, rm);
6834
                        shift = (insn >> 7) & 0x1f;
6835
                        if (insn & (1 << 6)) {
6836
                            /* pkhtb */
6837
                            if (shift == 0)
6838
                                shift = 31;
6839
                            tcg_gen_sari_i32(tmp2, tmp2, shift);
6840
                            tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6841
                            tcg_gen_ext16u_i32(tmp2, tmp2);
6842
                        } else {
6843
                            /* pkhbt */
6844
                            if (shift)
6845
                                tcg_gen_shli_i32(tmp2, tmp2, shift);
6846
                            tcg_gen_ext16u_i32(tmp, tmp);
6847
                            tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6848
                        }
6849
                        tcg_gen_or_i32(tmp, tmp, tmp2);
6850
                        dead_tmp(tmp2);
6851
                        store_reg(s, rd, tmp);
6852
                    } else if ((insn & 0x00200020) == 0x00200000) {
6853
                        /* [us]sat */
6854
                        tmp = load_reg(s, rm);
6855
                        shift = (insn >> 7) & 0x1f;
6856
                        if (insn & (1 << 6)) {
6857
                            if (shift == 0)
6858
                                shift = 31;
6859
                            tcg_gen_sari_i32(tmp, tmp, shift);
6860
                        } else {
6861
                            tcg_gen_shli_i32(tmp, tmp, shift);
6862
                        }
6863
                        sh = (insn >> 16) & 0x1f;
6864
                        if (sh != 0) {
6865
                            tmp2 = tcg_const_i32(sh);
6866
                            if (insn & (1 << 22))
6867
                                gen_helper_usat(tmp, tmp, tmp2);
6868
                            else
6869
                                gen_helper_ssat(tmp, tmp, tmp2);
6870
                            tcg_temp_free_i32(tmp2);
6871
                        }
6872
                        store_reg(s, rd, tmp);
6873
                    } else if ((insn & 0x00300fe0) == 0x00200f20) {
6874
                        /* [us]sat16 */
6875
                        tmp = load_reg(s, rm);
6876
                        sh = (insn >> 16) & 0x1f;
6877
                        if (sh != 0) {
6878
                            tmp2 = tcg_const_i32(sh);
6879
                            if (insn & (1 << 22))
6880
                                gen_helper_usat16(tmp, tmp, tmp2);
6881
                            else
6882
                                gen_helper_ssat16(tmp, tmp, tmp2);
6883
                            tcg_temp_free_i32(tmp2);
6884
                        }
6885
                        store_reg(s, rd, tmp);
6886
                    } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6887
                        /* Select bytes.  */
6888
                        tmp = load_reg(s, rn);
6889
                        tmp2 = load_reg(s, rm);
6890
                        tmp3 = new_tmp();
6891
                        tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6892
                        gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6893
                        dead_tmp(tmp3);
6894
                        dead_tmp(tmp2);
6895
                        store_reg(s, rd, tmp);
6896
                    } else if ((insn & 0x000003e0) == 0x00000060) {
6897
                        tmp = load_reg(s, rm);
6898
                        shift = (insn >> 10) & 3;
6899
                        /* ??? In many cases it's not neccessary to do a
6900
                           rotate, a shift is sufficient.  */
6901
                        if (shift != 0)
6902
                            tcg_gen_rotri_i32(tmp, tmp, shift * 8);
6903
                        op1 = (insn >> 20) & 7;
6904
                        switch (op1) {
6905
                        case 0: gen_sxtb16(tmp);  break;
6906
                        case 2: gen_sxtb(tmp);    break;
6907
                        case 3: gen_sxth(tmp);    break;
6908
                        case 4: gen_uxtb16(tmp);  break;
6909
                        case 6: gen_uxtb(tmp);    break;
6910
                        case 7: gen_uxth(tmp);    break;
6911
                        default: goto illegal_op;
6912
                        }
6913
                        if (rn != 15) {
6914
                            tmp2 = load_reg(s, rn);
6915
                            if ((op1 & 3) == 0) {
6916
                                gen_add16(tmp, tmp2);
6917
                            } else {
6918
                                tcg_gen_add_i32(tmp, tmp, tmp2);
6919
                                dead_tmp(tmp2);
6920
                            }
6921
                        }
6922
                        store_reg(s, rd, tmp);
6923
                    } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6924
                        /* rev */
6925
                        tmp = load_reg(s, rm);
6926
                        if (insn & (1 << 22)) {
6927
                            if (insn & (1 << 7)) {
6928
                                gen_revsh(tmp);
6929
                            } else {
6930
                                ARCH(6T2);
6931
                                gen_helper_rbit(tmp, tmp);
6932
                            }
6933
                        } else {
6934
                            if (insn & (1 << 7))
6935
                                gen_rev16(tmp);
6936
                            else
6937
                                tcg_gen_bswap32_i32(tmp, tmp);
6938
                        }
6939
                        store_reg(s, rd, tmp);
6940
                    } else {
6941
                        goto illegal_op;
6942
                    }
6943
                    break;
6944
                case 2: /* Multiplies (Type 3).  */
6945
                    tmp = load_reg(s, rm);
6946
                    tmp2 = load_reg(s, rs);
6947
                    if (insn & (1 << 20)) {
6948
                        /* Signed multiply most significant [accumulate].  */
6949
                        tmp64 = gen_muls_i64_i32(tmp, tmp2);
6950
                        if (insn & (1 << 5))
6951
                            tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6952
                        tcg_gen_shri_i64(tmp64, tmp64, 32);
6953
                        tmp = new_tmp();
6954
                        tcg_gen_trunc_i64_i32(tmp, tmp64);
6955
                        tcg_temp_free_i64(tmp64);
6956
                        if (rd != 15) {
6957
                            tmp2 = load_reg(s, rd);
6958
                            if (insn & (1 << 6)) {
6959
                                tcg_gen_sub_i32(tmp, tmp, tmp2);
6960
                            } else {
6961
                                tcg_gen_add_i32(tmp, tmp, tmp2);
6962
                            }
6963
                            dead_tmp(tmp2);
6964
                        }
6965
                        store_reg(s, rn, tmp);
6966
                    } else {
6967
                        if (insn & (1 << 5))
6968
                            gen_swap_half(tmp2);
6969
                        gen_smul_dual(tmp, tmp2);
6970
                        /* This addition cannot overflow.  */
6971
                        if (insn & (1 << 6)) {
6972
                            tcg_gen_sub_i32(tmp, tmp, tmp2);
6973
                        } else {
6974
                            tcg_gen_add_i32(tmp, tmp, tmp2);
6975
                        }
6976
                        dead_tmp(tmp2);
6977
                        if (insn & (1 << 22)) {
6978
                            /* smlald, smlsld */
6979
                            tmp64 = tcg_temp_new_i64();
6980
                            tcg_gen_ext_i32_i64(tmp64, tmp);
6981
                            dead_tmp(tmp);
6982
                            gen_addq(s, tmp64, rd, rn);
6983
                            gen_storeq_reg(s, rd, rn, tmp64);
6984
                            tcg_temp_free_i64(tmp64);
6985
                        } else {
6986
                            /* smuad, smusd, smlad, smlsd */
6987
                            if (rd != 15)
6988
                              {
6989
                                tmp2 = load_reg(s, rd);
6990
                                gen_helper_add_setq(tmp, tmp, tmp2);
6991
                                dead_tmp(tmp2);
6992
                              }
6993
                            store_reg(s, rn, tmp);
6994
                        }
6995
                    }
6996
                    break;
6997
                case 3:
6998
                    op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6999
                    switch (op1) {
7000
                    case 0: /* Unsigned sum of absolute differences.  */
7001
                        ARCH(6);
7002
                        tmp = load_reg(s, rm);
7003
                        tmp2 = load_reg(s, rs);
7004
                        gen_helper_usad8(tmp, tmp, tmp2);
7005
                        dead_tmp(tmp2);
7006
                        if (rd != 15) {
7007
                            tmp2 = load_reg(s, rd);
7008
                            tcg_gen_add_i32(tmp, tmp, tmp2);
7009
                            dead_tmp(tmp2);
7010
                        }
7011
                        store_reg(s, rn, tmp);
7012
                        break;
7013
                    case 0x20: case 0x24: case 0x28: case 0x2c:
7014
                        /* Bitfield insert/clear.  */
7015
                        ARCH(6T2);
7016
                        shift = (insn >> 7) & 0x1f;
7017
                        i = (insn >> 16) & 0x1f;
7018
                        i = i + 1 - shift;
7019
                        if (rm == 15) {
7020
                            tmp = new_tmp();
7021
                            tcg_gen_movi_i32(tmp, 0);
7022
                        } else {
7023
                            tmp = load_reg(s, rm);
7024
                        }
7025
                        if (i != 32) {
7026
                            tmp2 = load_reg(s, rd);
7027
                            gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7028
                            dead_tmp(tmp2);
7029
                        }
7030
                        store_reg(s, rd, tmp);
7031
                        break;
7032
                    case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7033
                    case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7034
                        ARCH(6T2);
7035
                        tmp = load_reg(s, rm);
7036
                        shift = (insn >> 7) & 0x1f;
7037
                        i = ((insn >> 16) & 0x1f) + 1;
7038
                        if (shift + i > 32)
7039
                            goto illegal_op;
7040
                        if (i < 32) {
7041
                            if (op1 & 0x20) {
7042
                                gen_ubfx(tmp, shift, (1u << i) - 1);
7043
                            } else {
7044
                                gen_sbfx(tmp, shift, i);
7045
                            }
7046
                        }
7047
                        store_reg(s, rd, tmp);
7048
                        break;
7049
                    default:
7050
                        goto illegal_op;
7051
                    }
7052
                    break;
7053
                }
7054
                break;
7055
            }
7056
        do_ldst:
7057
            /* Check for undefined extension instructions
7058
             * per the ARM Bible IE:
7059
             * xxxx 0111 1111 xxxx  xxxx xxxx 1111 xxxx
7060
             */
7061
            sh = (0xf << 20) | (0xf << 4);
7062
            if (op1 == 0x7 && ((insn & sh) == sh))
7063
            {
7064
                goto illegal_op;
7065
            }
7066
            /* load/store byte/word */
7067
            rn = (insn >> 16) & 0xf;
7068
            rd = (insn >> 12) & 0xf;
7069
            tmp2 = load_reg(s, rn);
7070
            i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7071
            if (insn & (1 << 24))
7072
                gen_add_data_offset(s, insn, tmp2);
7073
            if (insn & (1 << 20)) {
7074
                /* load */
7075
                if (insn & (1 << 22)) {
7076
                    tmp = gen_ld8u(tmp2, i);
7077
                } else {
7078
                    tmp = gen_ld32(tmp2, i);
7079
                }
7080
            } else {
7081
                /* store */
7082
                tmp = load_reg(s, rd);
7083
                if (insn & (1 << 22))
7084
                    gen_st8(tmp, tmp2, i);
7085
                else
7086
                    gen_st32(tmp, tmp2, i);
7087
            }
7088
            if (!(insn & (1 << 24))) {
7089
                gen_add_data_offset(s, insn, tmp2);
7090
                store_reg(s, rn, tmp2);
7091
            } else if (insn & (1 << 21)) {
7092
                store_reg(s, rn, tmp2);
7093
            } else {
7094
                dead_tmp(tmp2);
7095
            }
7096
            if (insn & (1 << 20)) {
7097
                /* Complete the load.  */
7098
                if (rd == 15)
7099
                    gen_bx(s, tmp);
7100
                else
7101
                    store_reg(s, rd, tmp);
7102
            }
7103
            break;
7104
        case 0x08:
7105
        case 0x09:
7106
            {
7107
                int j, n, user, loaded_base;
7108
                TCGv loaded_var;
7109
                /* load/store multiple words */
7110
                /* XXX: store correct base if write back */
7111
                user = 0;
7112
                if (insn & (1 << 22)) {
7113
                    if (IS_USER(s))
7114
                        goto illegal_op; /* only usable in supervisor mode */
7115

    
7116
                    if ((insn & (1 << 15)) == 0)
7117
                        user = 1;
7118
                }
7119
                rn = (insn >> 16) & 0xf;
7120
                addr = load_reg(s, rn);
7121

    
7122
                /* compute total size */
7123
                loaded_base = 0;
7124
                TCGV_UNUSED(loaded_var);
7125
                n = 0;
7126
                for(i=0;i<16;i++) {
7127
                    if (insn & (1 << i))
7128
                        n++;
7129
                }
7130
                /* XXX: test invalid n == 0 case ? */
7131
                if (insn & (1 << 23)) {
7132
                    if (insn & (1 << 24)) {
7133
                        /* pre increment */
7134
                        tcg_gen_addi_i32(addr, addr, 4);
7135
                    } else {
7136
                        /* post increment */
7137
                    }
7138
                } else {
7139
                    if (insn & (1 << 24)) {
7140
                        /* pre decrement */
7141
                        tcg_gen_addi_i32(addr, addr, -(n * 4));
7142
                    } else {
7143
                        /* post decrement */
7144
                        if (n != 1)
7145
                        tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7146
                    }
7147
                }
7148
                j = 0;
7149
                for(i=0;i<16;i++) {
7150
                    if (insn & (1 << i)) {
7151
                        if (insn & (1 << 20)) {
7152
                            /* load */
7153
                            tmp = gen_ld32(addr, IS_USER(s));
7154
                            if (i == 15) {
7155
                                gen_bx(s, tmp);
7156
                            } else if (user) {
7157
                                tmp2 = tcg_const_i32(i);
7158
                                gen_helper_set_user_reg(tmp2, tmp);
7159
                                tcg_temp_free_i32(tmp2);
7160
                                dead_tmp(tmp);
7161
                            } else if (i == rn) {
7162
                                loaded_var = tmp;
7163
                                loaded_base = 1;
7164
                            } else {
7165
                                store_reg(s, i, tmp);
7166
                            }
7167
                        } else {
7168
                            /* store */
7169
                            if (i == 15) {
7170
                                /* special case: r15 = PC + 8 */
7171
                                val = (long)s->pc + 4;
7172
                                tmp = new_tmp();
7173
                                tcg_gen_movi_i32(tmp, val);
7174
                            } else if (user) {
7175
                                tmp = new_tmp();
7176
                                tmp2 = tcg_const_i32(i);
7177
                                gen_helper_get_user_reg(tmp, tmp2);
7178
                                tcg_temp_free_i32(tmp2);
7179
                            } else {
7180
                                tmp = load_reg(s, i);
7181
                            }
7182
                            gen_st32(tmp, addr, IS_USER(s));
7183
                        }
7184
                        j++;
7185
                        /* no need to add after the last transfer */
7186
                        if (j != n)
7187
                            tcg_gen_addi_i32(addr, addr, 4);
7188
                    }
7189
                }
7190
                if (insn & (1 << 21)) {
7191
                    /* write back */
7192
                    if (insn & (1 << 23)) {
7193
                        if (insn & (1 << 24)) {
7194
                            /* pre increment */
7195
                        } else {
7196
                            /* post increment */
7197
                            tcg_gen_addi_i32(addr, addr, 4);
7198
                        }
7199
                    } else {
7200
                        if (insn & (1 << 24)) {
7201
                            /* pre decrement */
7202
                            if (n != 1)
7203
                                tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7204
                        } else {
7205
                            /* post decrement */
7206
                            tcg_gen_addi_i32(addr, addr, -(n * 4));
7207
                        }
7208
                    }
7209
                    store_reg(s, rn, addr);
7210
                } else {
7211
                    dead_tmp(addr);
7212
                }
7213
                if (loaded_base) {
7214
                    store_reg(s, rn, loaded_var);
7215
                }
7216
                if ((insn & (1 << 22)) && !user) {
7217
                    /* Restore CPSR from SPSR.  */
7218
                    tmp = load_cpu_field(spsr);
7219
                    gen_set_cpsr(tmp, 0xffffffff);
7220
                    dead_tmp(tmp);
7221
                    s->is_jmp = DISAS_UPDATE;
7222
                }
7223
            }
7224
            break;
7225
        case 0xa:
7226
        case 0xb:
7227
            {
7228
                int32_t offset;
7229

    
7230
                /* branch (and link) */
7231
                val = (int32_t)s->pc;
7232
                if (insn & (1 << 24)) {
7233
                    tmp = new_tmp();
7234
                    tcg_gen_movi_i32(tmp, val);
7235
                    store_reg(s, 14, tmp);
7236
                }
7237
                offset = (((int32_t)insn << 8) >> 8);
7238
                val += (offset << 2) + 4;
7239
                gen_jmp(s, val);
7240
            }
7241
            break;
7242
        case 0xc:
7243
        case 0xd:
7244
        case 0xe:
7245
            /* Coprocessor.  */
7246
            if (disas_coproc_insn(env, s, insn))
7247
                goto illegal_op;
7248
            break;
7249
        case 0xf:
7250
            /* swi */
7251
            gen_set_pc_im(s->pc);
7252
            s->is_jmp = DISAS_SWI;
7253
            break;
7254
        default:
7255
        illegal_op:
7256
            gen_set_condexec(s);
7257
            gen_set_pc_im(s->pc - 4);
7258
            gen_exception(EXCP_UDEF);
7259
            s->is_jmp = DISAS_JUMP;
7260
            break;
7261
        }
7262
    }
7263
}
7264

    
7265
/* Return true if this is a Thumb-2 logical op.  */
7266
static int
7267
thumb2_logic_op(int op)
7268
{
7269
    return (op < 8);
7270
}
7271

    
7272
/* Generate code for a Thumb-2 data processing operation.  If CONDS is nonzero
7273
   then set condition code flags based on the result of the operation.
7274
   If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7275
   to the high bit of T1.
7276
   Returns zero if the opcode is valid.  */
7277

    
7278
static int
7279
gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7280
{
7281
    int logic_cc;
7282

    
7283
    logic_cc = 0;
7284
    switch (op) {
7285
    case 0: /* and */
7286
        tcg_gen_and_i32(t0, t0, t1);
7287
        logic_cc = conds;
7288
        break;
7289
    case 1: /* bic */
7290
        tcg_gen_andc_i32(t0, t0, t1);
7291
        logic_cc = conds;
7292
        break;
7293
    case 2: /* orr */
7294
        tcg_gen_or_i32(t0, t0, t1);
7295
        logic_cc = conds;
7296
        break;
7297
    case 3: /* orn */
7298
        tcg_gen_not_i32(t1, t1);
7299
        tcg_gen_or_i32(t0, t0, t1);
7300
        logic_cc = conds;
7301
        break;
7302
    case 4: /* eor */
7303
        tcg_gen_xor_i32(t0, t0, t1);
7304
        logic_cc = conds;
7305
        break;
7306
    case 8: /* add */
7307
        if (conds)
7308
            gen_helper_add_cc(t0, t0, t1);
7309
        else
7310
            tcg_gen_add_i32(t0, t0, t1);
7311
        break;
7312
    case 10: /* adc */
7313
        if (conds)
7314
            gen_helper_adc_cc(t0, t0, t1);
7315
        else
7316
            gen_adc(t0, t1);
7317
        break;
7318
    case 11: /* sbc */
7319
        if (conds)
7320
            gen_helper_sbc_cc(t0, t0, t1);
7321
        else
7322
            gen_sub_carry(t0, t0, t1);
7323
        break;
7324
    case 13: /* sub */
7325
        if (conds)
7326
            gen_helper_sub_cc(t0, t0, t1);
7327
        else
7328
            tcg_gen_sub_i32(t0, t0, t1);
7329
        break;
7330
    case 14: /* rsb */
7331
        if (conds)
7332
            gen_helper_sub_cc(t0, t1, t0);
7333
        else
7334
            tcg_gen_sub_i32(t0, t1, t0);
7335
        break;
7336
    default: /* 5, 6, 7, 9, 12, 15. */
7337
        return 1;
7338
    }
7339
    if (logic_cc) {
7340
        gen_logic_CC(t0);
7341
        if (shifter_out)
7342
            gen_set_CF_bit31(t1);
7343
    }
7344
    return 0;
7345
}
7346

    
7347
/* Translate a 32-bit thumb instruction.  Returns nonzero if the instruction
7348
   is not legal.  */
7349
static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7350
{
7351
    uint32_t insn, imm, shift, offset;
7352
    uint32_t rd, rn, rm, rs;
7353
    TCGv tmp;
7354
    TCGv tmp2;
7355
    TCGv tmp3;
7356
    TCGv addr;
7357
    TCGv_i64 tmp64;
7358
    int op;
7359
    int shiftop;
7360
    int conds;
7361
    int logic_cc;
7362

    
7363
    if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7364
          || arm_feature (env, ARM_FEATURE_M))) {
7365
        /* Thumb-1 cores may need to treat bl and blx as a pair of
7366
           16-bit instructions to get correct prefetch abort behavior.  */
7367
        insn = insn_hw1;
7368
        if ((insn & (1 << 12)) == 0) {
7369
            /* Second half of blx.  */
7370
            offset = ((insn & 0x7ff) << 1);
7371
            tmp = load_reg(s, 14);
7372
            tcg_gen_addi_i32(tmp, tmp, offset);
7373
            tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7374

    
7375
            tmp2 = new_tmp();
7376
            tcg_gen_movi_i32(tmp2, s->pc | 1);
7377
            store_reg(s, 14, tmp2);
7378
            gen_bx(s, tmp);
7379
            return 0;
7380
        }
7381
        if (insn & (1 << 11)) {
7382
            /* Second half of bl.  */
7383
            offset = ((insn & 0x7ff) << 1) | 1;
7384
            tmp = load_reg(s, 14);
7385
            tcg_gen_addi_i32(tmp, tmp, offset);
7386

    
7387
            tmp2 = new_tmp();
7388
            tcg_gen_movi_i32(tmp2, s->pc | 1);
7389
            store_reg(s, 14, tmp2);
7390
            gen_bx(s, tmp);
7391
            return 0;
7392
        }
7393
        if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7394
            /* Instruction spans a page boundary.  Implement it as two
7395
               16-bit instructions in case the second half causes an
7396
               prefetch abort.  */
7397
            offset = ((int32_t)insn << 21) >> 9;
7398
            tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7399
            return 0;
7400
        }
7401
        /* Fall through to 32-bit decode.  */
7402
    }
7403

    
7404
    insn = lduw_code(s->pc);
7405
    s->pc += 2;
7406
    insn |= (uint32_t)insn_hw1 << 16;
7407

    
7408
    if ((insn & 0xf800e800) != 0xf000e800) {
7409
        ARCH(6T2);
7410
    }
7411

    
7412
    rn = (insn >> 16) & 0xf;
7413
    rs = (insn >> 12) & 0xf;
7414
    rd = (insn >> 8) & 0xf;
7415
    rm = insn & 0xf;
7416
    switch ((insn >> 25) & 0xf) {
7417
    case 0: case 1: case 2: case 3:
7418
        /* 16-bit instructions.  Should never happen.  */
7419
        abort();
7420
    case 4:
7421
        if (insn & (1 << 22)) {
7422
            /* Other load/store, table branch.  */
7423
            if (insn & 0x01200000) {
7424
                /* Load/store doubleword.  */
7425
                if (rn == 15) {
7426
                    addr = new_tmp();
7427
                    tcg_gen_movi_i32(addr, s->pc & ~3);
7428
                } else {
7429
                    addr = load_reg(s, rn);
7430
                }
7431
                offset = (insn & 0xff) * 4;
7432
                if ((insn & (1 << 23)) == 0)
7433
                    offset = -offset;
7434
                if (insn & (1 << 24)) {
7435
                    tcg_gen_addi_i32(addr, addr, offset);
7436
                    offset = 0;
7437
                }
7438
                if (insn & (1 << 20)) {
7439
                    /* ldrd */
7440
                    tmp = gen_ld32(addr, IS_USER(s));
7441
                    store_reg(s, rs, tmp);
7442
                    tcg_gen_addi_i32(addr, addr, 4);
7443
                    tmp = gen_ld32(addr, IS_USER(s));
7444
                    store_reg(s, rd, tmp);
7445
                } else {
7446
                    /* strd */
7447
                    tmp = load_reg(s, rs);
7448
                    gen_st32(tmp, addr, IS_USER(s));
7449
                    tcg_gen_addi_i32(addr, addr, 4);
7450
                    tmp = load_reg(s, rd);
7451
                    gen_st32(tmp, addr, IS_USER(s));
7452
                }
7453
                if (insn & (1 << 21)) {
7454
                    /* Base writeback.  */
7455
                    if (rn == 15)
7456
                        goto illegal_op;
7457
                    tcg_gen_addi_i32(addr, addr, offset - 4);
7458
                    store_reg(s, rn, addr);
7459
                } else {
7460
                    dead_tmp(addr);
7461
                }
7462
            } else if ((insn & (1 << 23)) == 0) {
7463
                /* Load/store exclusive word.  */
7464
                addr = tcg_temp_local_new();
7465
                load_reg_var(s, addr, rn);
7466
                tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7467
                if (insn & (1 << 20)) {
7468
                    gen_load_exclusive(s, rs, 15, addr, 2);
7469
                } else {
7470
                    gen_store_exclusive(s, rd, rs, 15, addr, 2);
7471
                }
7472
                tcg_temp_free(addr);
7473
            } else if ((insn & (1 << 6)) == 0) {
7474
                /* Table Branch.  */
7475
                if (rn == 15) {
7476
                    addr = new_tmp();
7477
                    tcg_gen_movi_i32(addr, s->pc);
7478
                } else {
7479
                    addr = load_reg(s, rn);
7480
                }
7481
                tmp = load_reg(s, rm);
7482
                tcg_gen_add_i32(addr, addr, tmp);
7483
                if (insn & (1 << 4)) {
7484
                    /* tbh */
7485
                    tcg_gen_add_i32(addr, addr, tmp);
7486
                    dead_tmp(tmp);
7487
                    tmp = gen_ld16u(addr, IS_USER(s));
7488
                } else { /* tbb */
7489
                    dead_tmp(tmp);
7490
                    tmp = gen_ld8u(addr, IS_USER(s));
7491
                }
7492
                dead_tmp(addr);
7493
                tcg_gen_shli_i32(tmp, tmp, 1);
7494
                tcg_gen_addi_i32(tmp, tmp, s->pc);
7495
                store_reg(s, 15, tmp);
7496
            } else {
7497
                /* Load/store exclusive byte/halfword/doubleword.  */
7498
                ARCH(7);
7499
                op = (insn >> 4) & 0x3;
7500
                if (op == 2) {
7501
                    goto illegal_op;
7502
                }
7503
                addr = tcg_temp_local_new();
7504
                load_reg_var(s, addr, rn);
7505
                if (insn & (1 << 20)) {
7506
                    gen_load_exclusive(s, rs, rd, addr, op);
7507
                } else {
7508
                    gen_store_exclusive(s, rm, rs, rd, addr, op);
7509
                }
7510
                tcg_temp_free(addr);
7511
            }
7512
        } else {
7513
            /* Load/store multiple, RFE, SRS.  */
7514
            if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7515
                /* Not available in user mode.  */
7516
                if (IS_USER(s))
7517
                    goto illegal_op;
7518
                if (insn & (1 << 20)) {
7519
                    /* rfe */
7520
                    addr = load_reg(s, rn);
7521
                    if ((insn & (1 << 24)) == 0)
7522
                        tcg_gen_addi_i32(addr, addr, -8);
7523
                    /* Load PC into tmp and CPSR into tmp2.  */
7524
                    tmp = gen_ld32(addr, 0);
7525
                    tcg_gen_addi_i32(addr, addr, 4);
7526
                    tmp2 = gen_ld32(addr, 0);
7527
                    if (insn & (1 << 21)) {
7528
                        /* Base writeback.  */
7529
                        if (insn & (1 << 24)) {
7530
                            tcg_gen_addi_i32(addr, addr, 4);
7531
                        } else {
7532
                            tcg_gen_addi_i32(addr, addr, -4);
7533
                        }
7534
                        store_reg(s, rn, addr);
7535
                    } else {
7536
                        dead_tmp(addr);
7537
                    }
7538
                    gen_rfe(s, tmp, tmp2);
7539
                } else {
7540
                    /* srs */
7541
                    op = (insn & 0x1f);
7542
                    if (op == (env->uncached_cpsr & CPSR_M)) {
7543
                        addr = load_reg(s, 13);
7544
                    } else {
7545
                        addr = new_tmp();
7546
                        tmp = tcg_const_i32(op);
7547
                        gen_helper_get_r13_banked(addr, cpu_env, tmp);
7548
                        tcg_temp_free_i32(tmp);
7549
                    }
7550
                    if ((insn & (1 << 24)) == 0) {
7551
                        tcg_gen_addi_i32(addr, addr, -8);
7552
                    }
7553
                    tmp = load_reg(s, 14);
7554
                    gen_st32(tmp, addr, 0);
7555
                    tcg_gen_addi_i32(addr, addr, 4);
7556
                    tmp = new_tmp();
7557
                    gen_helper_cpsr_read(tmp);
7558
                    gen_st32(tmp, addr, 0);
7559
                    if (insn & (1 << 21)) {
7560
                        if ((insn & (1 << 24)) == 0) {
7561
                            tcg_gen_addi_i32(addr, addr, -4);
7562
                        } else {
7563
                            tcg_gen_addi_i32(addr, addr, 4);
7564
                        }
7565
                        if (op == (env->uncached_cpsr & CPSR_M)) {
7566
                            store_reg(s, 13, addr);
7567
                        } else {
7568
                            tmp = tcg_const_i32(op);
7569
                            gen_helper_set_r13_banked(cpu_env, tmp, addr);
7570
                            tcg_temp_free_i32(tmp);
7571
                        }
7572
                    } else {
7573
                        dead_tmp(addr);
7574
                    }
7575
                }
7576
            } else {
7577
                int i;
7578
                /* Load/store multiple.  */
7579
                addr = load_reg(s, rn);
7580
                offset = 0;
7581
                for (i = 0; i < 16; i++) {
7582
                    if (insn & (1 << i))
7583
                        offset += 4;
7584
                }
7585
                if (insn & (1 << 24)) {
7586
                    tcg_gen_addi_i32(addr, addr, -offset);
7587
                }
7588

    
7589
                for (i = 0; i < 16; i++) {
7590
                    if ((insn & (1 << i)) == 0)
7591
                        continue;
7592
                    if (insn & (1 << 20)) {
7593
                        /* Load.  */
7594
                        tmp = gen_ld32(addr, IS_USER(s));
7595
                        if (i == 15) {
7596
                            gen_bx(s, tmp);
7597
                        } else {
7598
                            store_reg(s, i, tmp);
7599
                        }
7600
                    } else {
7601
                        /* Store.  */
7602
                        tmp = load_reg(s, i);
7603
                        gen_st32(tmp, addr, IS_USER(s));
7604
                    }
7605
                    tcg_gen_addi_i32(addr, addr, 4);
7606
                }
7607
                if (insn & (1 << 21)) {
7608
                    /* Base register writeback.  */
7609
                    if (insn & (1 << 24)) {
7610
                        tcg_gen_addi_i32(addr, addr, -offset);
7611
                    }
7612
                    /* Fault if writeback register is in register list.  */
7613
                    if (insn & (1 << rn))
7614
                        goto illegal_op;
7615
                    store_reg(s, rn, addr);
7616
                } else {
7617
                    dead_tmp(addr);
7618
                }
7619
            }
7620
        }
7621
        break;
7622
    case 5:
7623

    
7624
        op = (insn >> 21) & 0xf;
7625
        if (op == 6) {
7626
            /* Halfword pack.  */
7627
            tmp = load_reg(s, rn);
7628
            tmp2 = load_reg(s, rm);
7629
            shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7630
            if (insn & (1 << 5)) {
7631
                /* pkhtb */
7632
                if (shift == 0)
7633
                    shift = 31;
7634
                tcg_gen_sari_i32(tmp2, tmp2, shift);
7635
                tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7636
                tcg_gen_ext16u_i32(tmp2, tmp2);
7637
            } else {
7638
                /* pkhbt */
7639
                if (shift)
7640
                    tcg_gen_shli_i32(tmp2, tmp2, shift);
7641
                tcg_gen_ext16u_i32(tmp, tmp);
7642
                tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7643
            }
7644
            tcg_gen_or_i32(tmp, tmp, tmp2);
7645
            dead_tmp(tmp2);
7646
            store_reg(s, rd, tmp);
7647
        } else {
7648
            /* Data processing register constant shift.  */
7649
            if (rn == 15) {
7650
                tmp = new_tmp();
7651
                tcg_gen_movi_i32(tmp, 0);
7652
            } else {
7653
                tmp = load_reg(s, rn);
7654
            }
7655
            tmp2 = load_reg(s, rm);
7656

    
7657
            shiftop = (insn >> 4) & 3;
7658
            shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7659
            conds = (insn & (1 << 20)) != 0;
7660
            logic_cc = (conds && thumb2_logic_op(op));
7661
            gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7662
            if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7663
                goto illegal_op;
7664
            dead_tmp(tmp2);
7665
            if (rd != 15) {
7666
                store_reg(s, rd, tmp);
7667
            } else {
7668
                dead_tmp(tmp);
7669
            }
7670
        }
7671
        break;
7672
    case 13: /* Misc data processing.  */
7673
        op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7674
        if (op < 4 && (insn & 0xf000) != 0xf000)
7675
            goto illegal_op;
7676
        switch (op) {
7677
        case 0: /* Register controlled shift.  */
7678
            tmp = load_reg(s, rn);
7679
            tmp2 = load_reg(s, rm);
7680
            if ((insn & 0x70) != 0)
7681
                goto illegal_op;
7682
            op = (insn >> 21) & 3;
7683
            logic_cc = (insn & (1 << 20)) != 0;
7684
            gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7685
            if (logic_cc)
7686
                gen_logic_CC(tmp);
7687
            store_reg_bx(env, s, rd, tmp);
7688
            break;
7689
        case 1: /* Sign/zero extend.  */
7690
            tmp = load_reg(s, rm);
7691
            shift = (insn >> 4) & 3;
7692
            /* ??? In many cases it's not neccessary to do a
7693
               rotate, a shift is sufficient.  */
7694
            if (shift != 0)
7695
                tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7696
            op = (insn >> 20) & 7;
7697
            switch (op) {
7698
            case 0: gen_sxth(tmp);   break;
7699
            case 1: gen_uxth(tmp);   break;
7700
            case 2: gen_sxtb16(tmp); break;
7701
            case 3: gen_uxtb16(tmp); break;
7702
            case 4: gen_sxtb(tmp);   break;
7703
            case 5: gen_uxtb(tmp);   break;
7704
            default: goto illegal_op;
7705
            }
7706
            if (rn != 15) {
7707
                tmp2 = load_reg(s, rn);
7708
                if ((op >> 1) == 1) {
7709
                    gen_add16(tmp, tmp2);
7710
                } else {
7711
                    tcg_gen_add_i32(tmp, tmp, tmp2);
7712
                    dead_tmp(tmp2);
7713
                }
7714
            }
7715
            store_reg(s, rd, tmp);
7716
            break;
7717
        case 2: /* SIMD add/subtract.  */
7718
            op = (insn >> 20) & 7;
7719
            shift = (insn >> 4) & 7;
7720
            if ((op & 3) == 3 || (shift & 3) == 3)
7721
                goto illegal_op;
7722
            tmp = load_reg(s, rn);
7723
            tmp2 = load_reg(s, rm);
7724
            gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7725
            dead_tmp(tmp2);
7726
            store_reg(s, rd, tmp);
7727
            break;
7728
        case 3: /* Other data processing.  */
7729
            op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7730
            if (op < 4) {
7731
                /* Saturating add/subtract.  */
7732
                tmp = load_reg(s, rn);
7733
                tmp2 = load_reg(s, rm);
7734
                if (op & 1)
7735
                    gen_helper_double_saturate(tmp, tmp);
7736
                if (op & 2)
7737
                    gen_helper_sub_saturate(tmp, tmp2, tmp);
7738
                else
7739
                    gen_helper_add_saturate(tmp, tmp, tmp2);
7740
                dead_tmp(tmp2);
7741
            } else {
7742
                tmp = load_reg(s, rn);
7743
                switch (op) {
7744
                case 0x0a: /* rbit */
7745
                    gen_helper_rbit(tmp, tmp);
7746
                    break;
7747
                case 0x08: /* rev */
7748
                    tcg_gen_bswap32_i32(tmp, tmp);
7749
                    break;
7750
                case 0x09: /* rev16 */
7751
                    gen_rev16(tmp);
7752
                    break;
7753
                case 0x0b: /* revsh */
7754
                    gen_revsh(tmp);
7755
                    break;
7756
                case 0x10: /* sel */
7757
                    tmp2 = load_reg(s, rm);
7758
                    tmp3 = new_tmp();
7759
                    tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7760
                    gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7761
                    dead_tmp(tmp3);
7762
                    dead_tmp(tmp2);
7763
                    break;
7764
                case 0x18: /* clz */
7765
                    gen_helper_clz(tmp, tmp);
7766
                    break;
7767
                default:
7768
                    goto illegal_op;
7769
                }
7770
            }
7771
            store_reg(s, rd, tmp);
7772
            break;
7773
        case 4: case 5: /* 32-bit multiply.  Sum of absolute differences.  */
7774
            op = (insn >> 4) & 0xf;
7775
            tmp = load_reg(s, rn);
7776
            tmp2 = load_reg(s, rm);
7777
            switch ((insn >> 20) & 7) {
7778
            case 0: /* 32 x 32 -> 32 */
7779
                tcg_gen_mul_i32(tmp, tmp, tmp2);
7780
                dead_tmp(tmp2);
7781
                if (rs != 15) {
7782
                    tmp2 = load_reg(s, rs);
7783
                    if (op)
7784
                        tcg_gen_sub_i32(tmp, tmp2, tmp);
7785
                    else
7786
                        tcg_gen_add_i32(tmp, tmp, tmp2);
7787
                    dead_tmp(tmp2);
7788
                }
7789
                break;
7790
            case 1: /* 16 x 16 -> 32 */
7791
                gen_mulxy(tmp, tmp2, op & 2, op & 1);
7792
                dead_tmp(tmp2);
7793
                if (rs != 15) {
7794
                    tmp2 = load_reg(s, rs);
7795
                    gen_helper_add_setq(tmp, tmp, tmp2);
7796
                    dead_tmp(tmp2);
7797
                }
7798
                break;
7799
            case 2: /* Dual multiply add.  */
7800
            case 4: /* Dual multiply subtract.  */
7801
                if (op)
7802
                    gen_swap_half(tmp2);
7803
                gen_smul_dual(tmp, tmp2);
7804
                /* This addition cannot overflow.  */
7805
                if (insn & (1 << 22)) {
7806
                    tcg_gen_sub_i32(tmp, tmp, tmp2);
7807
                } else {
7808
                    tcg_gen_add_i32(tmp, tmp, tmp2);
7809
                }
7810
                dead_tmp(tmp2);
7811
                if (rs != 15)
7812
                  {
7813
                    tmp2 = load_reg(s, rs);
7814
                    gen_helper_add_setq(tmp, tmp, tmp2);
7815
                    dead_tmp(tmp2);
7816
                  }
7817
                break;
7818
            case 3: /* 32 * 16 -> 32msb */
7819
                if (op)
7820
                    tcg_gen_sari_i32(tmp2, tmp2, 16);
7821
                else
7822
                    gen_sxth(tmp2);
7823
                tmp64 = gen_muls_i64_i32(tmp, tmp2);
7824
                tcg_gen_shri_i64(tmp64, tmp64, 16);
7825
                tmp = new_tmp();
7826
                tcg_gen_trunc_i64_i32(tmp, tmp64);
7827
                tcg_temp_free_i64(tmp64);
7828
                if (rs != 15)
7829
                  {
7830
                    tmp2 = load_reg(s, rs);
7831
                    gen_helper_add_setq(tmp, tmp, tmp2);
7832
                    dead_tmp(tmp2);
7833
                  }
7834
                break;
7835
            case 5: case 6: /* 32 * 32 -> 32msb */
7836
                gen_imull(tmp, tmp2);
7837
                if (insn & (1 << 5)) {
7838
                    gen_roundqd(tmp, tmp2);
7839
                    dead_tmp(tmp2);
7840
                } else {
7841
                    dead_tmp(tmp);
7842
                    tmp = tmp2;
7843
                }
7844
                if (rs != 15) {
7845
                    tmp2 = load_reg(s, rs);
7846
                    if (insn & (1 << 21)) {
7847
                        tcg_gen_add_i32(tmp, tmp, tmp2);
7848
                    } else {
7849
                        tcg_gen_sub_i32(tmp, tmp2, tmp);
7850
                    }
7851
                    dead_tmp(tmp2);
7852
                }
7853
                break;
7854
            case 7: /* Unsigned sum of absolute differences.  */
7855
                gen_helper_usad8(tmp, tmp, tmp2);
7856
                dead_tmp(tmp2);
7857
                if (rs != 15) {
7858
                    tmp2 = load_reg(s, rs);
7859
                    tcg_gen_add_i32(tmp, tmp, tmp2);
7860
                    dead_tmp(tmp2);
7861
                }
7862
                break;
7863
            }
7864
            store_reg(s, rd, tmp);
7865
            break;
7866
        case 6: case 7: /* 64-bit multiply, Divide.  */
7867
            op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7868
            tmp = load_reg(s, rn);
7869
            tmp2 = load_reg(s, rm);
7870
            if ((op & 0x50) == 0x10) {
7871
                /* sdiv, udiv */
7872
                if (!arm_feature(env, ARM_FEATURE_DIV))
7873
                    goto illegal_op;
7874
                if (op & 0x20)
7875
                    gen_helper_udiv(tmp, tmp, tmp2);
7876
                else
7877
                    gen_helper_sdiv(tmp, tmp, tmp2);
7878
                dead_tmp(tmp2);
7879
                store_reg(s, rd, tmp);
7880
            } else if ((op & 0xe) == 0xc) {
7881
                /* Dual multiply accumulate long.  */
7882
                if (op & 1)
7883
                    gen_swap_half(tmp2);
7884
                gen_smul_dual(tmp, tmp2);
7885
                if (op & 0x10) {
7886
                    tcg_gen_sub_i32(tmp, tmp, tmp2);
7887
                } else {
7888
                    tcg_gen_add_i32(tmp, tmp, tmp2);
7889
                }
7890
                dead_tmp(tmp2);
7891
                /* BUGFIX */
7892
                tmp64 = tcg_temp_new_i64();
7893
                tcg_gen_ext_i32_i64(tmp64, tmp);
7894
                dead_tmp(tmp);
7895
                gen_addq(s, tmp64, rs, rd);
7896
                gen_storeq_reg(s, rs, rd, tmp64);
7897
                tcg_temp_free_i64(tmp64);
7898
            } else {
7899
                if (op & 0x20) {
7900
                    /* Unsigned 64-bit multiply  */
7901
                    tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7902
                } else {
7903
                    if (op & 8) {
7904
                        /* smlalxy */
7905
                        gen_mulxy(tmp, tmp2, op & 2, op & 1);
7906
                        dead_tmp(tmp2);
7907
                        tmp64 = tcg_temp_new_i64();
7908
                        tcg_gen_ext_i32_i64(tmp64, tmp);
7909
                        dead_tmp(tmp);
7910
                    } else {
7911
                        /* Signed 64-bit multiply  */
7912
                        tmp64 = gen_muls_i64_i32(tmp, tmp2);
7913
                    }
7914
                }
7915
                if (op & 4) {
7916
                    /* umaal */
7917
                    gen_addq_lo(s, tmp64, rs);
7918
                    gen_addq_lo(s, tmp64, rd);
7919
                } else if (op & 0x40) {
7920
                    /* 64-bit accumulate.  */
7921
                    gen_addq(s, tmp64, rs, rd);
7922
                }
7923
                gen_storeq_reg(s, rs, rd, tmp64);
7924
                tcg_temp_free_i64(tmp64);
7925
            }
7926
            break;
7927
        }
7928
        break;
7929
    case 6: case 7: case 14: case 15:
7930
        /* Coprocessor.  */
7931
        if (((insn >> 24) & 3) == 3) {
7932
            /* Translate into the equivalent ARM encoding.  */
7933
            insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7934
            if (disas_neon_data_insn(env, s, insn))
7935
                goto illegal_op;
7936
        } else {
7937
            if (insn & (1 << 28))
7938
                goto illegal_op;
7939
            if (disas_coproc_insn (env, s, insn))
7940
                goto illegal_op;
7941
        }
7942
        break;
7943
    case 8: case 9: case 10: case 11:
7944
        if (insn & (1 << 15)) {
7945
            /* Branches, misc control.  */
7946
            if (insn & 0x5000) {
7947
                /* Unconditional branch.  */
7948
                /* signextend(hw1[10:0]) -> offset[:12].  */
7949
                offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7950
                /* hw1[10:0] -> offset[11:1].  */
7951
                offset |= (insn & 0x7ff) << 1;
7952
                /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7953
                   offset[24:22] already have the same value because of the
7954
                   sign extension above.  */
7955
                offset ^= ((~insn) & (1 << 13)) << 10;
7956
                offset ^= ((~insn) & (1 << 11)) << 11;
7957

    
7958
                if (insn & (1 << 14)) {
7959
                    /* Branch and link.  */
7960
                    tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
7961
                }
7962

    
7963
                offset += s->pc;
7964
                if (insn & (1 << 12)) {
7965
                    /* b/bl */
7966
                    gen_jmp(s, offset);
7967
                } else {
7968
                    /* blx */
7969
                    offset &= ~(uint32_t)2;
7970
                    gen_bx_im(s, offset);
7971
                }
7972
            } else if (((insn >> 23) & 7) == 7) {
7973
                /* Misc control */
7974
                if (insn & (1 << 13))
7975
                    goto illegal_op;
7976

    
7977
                if (insn & (1 << 26)) {
7978
                    /* Secure monitor call (v6Z) */
7979
                    goto illegal_op; /* not implemented.  */
7980
                } else {
7981
                    op = (insn >> 20) & 7;
7982
                    switch (op) {
7983
                    case 0: /* msr cpsr.  */
7984
                        if (IS_M(env)) {
7985
                            tmp = load_reg(s, rn);
7986
                            addr = tcg_const_i32(insn & 0xff);
7987
                            gen_helper_v7m_msr(cpu_env, addr, tmp);
7988
                            tcg_temp_free_i32(addr);
7989
                            dead_tmp(tmp);
7990
                            gen_lookup_tb(s);
7991
                            break;
7992
                        }
7993
                        /* fall through */
7994
                    case 1: /* msr spsr.  */
7995
                        if (IS_M(env))
7996
                            goto illegal_op;
7997
                        tmp = load_reg(s, rn);
7998
                        if (gen_set_psr(s,
7999
                              msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8000
                              op == 1, tmp))
8001
                            goto illegal_op;
8002
                        break;
8003
                    case 2: /* cps, nop-hint.  */
8004
                        if (((insn >> 8) & 7) == 0) {
8005
                            gen_nop_hint(s, insn & 0xff);
8006
                        }
8007
                        /* Implemented as NOP in user mode.  */
8008
                        if (IS_USER(s))
8009
                            break;
8010
                        offset = 0;
8011
                        imm = 0;
8012
                        if (insn & (1 << 10)) {
8013
                            if (insn & (1 << 7))
8014
                                offset |= CPSR_A;
8015
                            if (insn & (1 << 6))
8016
                                offset |= CPSR_I;
8017
                            if (insn & (1 << 5))
8018
                                offset |= CPSR_F;
8019
                            if (insn & (1 << 9))
8020
                                imm = CPSR_A | CPSR_I | CPSR_F;
8021
                        }
8022
                        if (insn & (1 << 8)) {
8023
                            offset |= 0x1f;
8024
                            imm |= (insn & 0x1f);
8025
                        }
8026
                        if (offset) {
8027
                            gen_set_psr_im(s, offset, 0, imm);
8028
                        }
8029
                        break;
8030
                    case 3: /* Special control operations.  */
8031
                        ARCH(7);
8032
                        op = (insn >> 4) & 0xf;
8033
                        switch (op) {
8034
                        case 2: /* clrex */
8035
                            gen_clrex(s);
8036
                            break;
8037
                        case 4: /* dsb */
8038
                        case 5: /* dmb */
8039
                        case 6: /* isb */
8040
                            /* These execute as NOPs.  */
8041
                            break;
8042
                        default:
8043
                            goto illegal_op;
8044
                        }
8045
                        break;
8046
                    case 4: /* bxj */
8047
                        /* Trivial implementation equivalent to bx.  */
8048
                        tmp = load_reg(s, rn);
8049
                        gen_bx(s, tmp);
8050
                        break;
8051
                    case 5: /* Exception return.  */
8052
                        if (IS_USER(s)) {
8053
                            goto illegal_op;
8054
                        }
8055
                        if (rn != 14 || rd != 15) {
8056
                            goto illegal_op;
8057
                        }
8058
                        tmp = load_reg(s, rn);
8059
                        tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8060
                        gen_exception_return(s, tmp);
8061
                        break;
8062
                    case 6: /* mrs cpsr.  */
8063
                        tmp = new_tmp();
8064
                        if (IS_M(env)) {
8065
                            addr = tcg_const_i32(insn & 0xff);
8066
                            gen_helper_v7m_mrs(tmp, cpu_env, addr);
8067
                            tcg_temp_free_i32(addr);
8068
                        } else {
8069
                            gen_helper_cpsr_read(tmp);
8070
                        }
8071
                        store_reg(s, rd, tmp);
8072
                        break;
8073
                    case 7: /* mrs spsr.  */
8074
                        /* Not accessible in user mode.  */
8075
                        if (IS_USER(s) || IS_M(env))
8076
                            goto illegal_op;
8077
                        tmp = load_cpu_field(spsr);
8078
                        store_reg(s, rd, tmp);
8079
                        break;
8080
                    }
8081
                }
8082
            } else {
8083
                /* Conditional branch.  */
8084
                op = (insn >> 22) & 0xf;
8085
                /* Generate a conditional jump to next instruction.  */
8086
                s->condlabel = gen_new_label();
8087
                gen_test_cc(op ^ 1, s->condlabel);
8088
                s->condjmp = 1;
8089

    
8090
                /* offset[11:1] = insn[10:0] */
8091
                offset = (insn & 0x7ff) << 1;
8092
                /* offset[17:12] = insn[21:16].  */
8093
                offset |= (insn & 0x003f0000) >> 4;
8094
                /* offset[31:20] = insn[26].  */
8095
                offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8096
                /* offset[18] = insn[13].  */
8097
                offset |= (insn & (1 << 13)) << 5;
8098
                /* offset[19] = insn[11].  */
8099
                offset |= (insn & (1 << 11)) << 8;
8100

    
8101
                /* jump to the offset */
8102
                gen_jmp(s, s->pc + offset);
8103
            }
8104
        } else {
8105
            /* Data processing immediate.  */
8106
            if (insn & (1 << 25)) {
8107
                if (insn & (1 << 24)) {
8108
                    if (insn & (1 << 20))
8109
                        goto illegal_op;
8110
                    /* Bitfield/Saturate.  */
8111
                    op = (insn >> 21) & 7;
8112
                    imm = insn & 0x1f;
8113
                    shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8114
                    if (rn == 15) {
8115
                        tmp = new_tmp();
8116
                        tcg_gen_movi_i32(tmp, 0);
8117
                    } else {
8118
                        tmp = load_reg(s, rn);
8119
                    }
8120
                    switch (op) {
8121
                    case 2: /* Signed bitfield extract.  */
8122
                        imm++;
8123
                        if (shift + imm > 32)
8124
                            goto illegal_op;
8125
                        if (imm < 32)
8126
                            gen_sbfx(tmp, shift, imm);
8127
                        break;
8128
                    case 6: /* Unsigned bitfield extract.  */
8129
                        imm++;
8130
                        if (shift + imm > 32)
8131
                            goto illegal_op;
8132
                        if (imm < 32)
8133
                            gen_ubfx(tmp, shift, (1u << imm) - 1);
8134
                        break;
8135
                    case 3: /* Bitfield insert/clear.  */
8136
                        if (imm < shift)
8137
                            goto illegal_op;
8138
                        imm = imm + 1 - shift;
8139
                        if (imm != 32) {
8140
                            tmp2 = load_reg(s, rd);
8141
                            gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8142
                            dead_tmp(tmp2);
8143
                        }
8144
                        break;
8145
                    case 7:
8146
                        goto illegal_op;
8147
                    default: /* Saturate.  */
8148
                        if (shift) {
8149
                            if (op & 1)
8150
                                tcg_gen_sari_i32(tmp, tmp, shift);
8151
                            else
8152
                                tcg_gen_shli_i32(tmp, tmp, shift);
8153
                        }
8154
                        tmp2 = tcg_const_i32(imm);
8155
                        if (op & 4) {
8156
                            /* Unsigned.  */
8157
                            if ((op & 1) && shift == 0)
8158
                                gen_helper_usat16(tmp, tmp, tmp2);
8159
                            else
8160
                                gen_helper_usat(tmp, tmp, tmp2);
8161
                        } else {
8162
                            /* Signed.  */
8163
                            if ((op & 1) && shift == 0)
8164
                                gen_helper_ssat16(tmp, tmp, tmp2);
8165
                            else
8166
                                gen_helper_ssat(tmp, tmp, tmp2);
8167
                        }
8168
                        tcg_temp_free_i32(tmp2);
8169
                        break;
8170
                    }
8171
                    store_reg(s, rd, tmp);
8172
                } else {
8173
                    imm = ((insn & 0x04000000) >> 15)
8174
                          | ((insn & 0x7000) >> 4) | (insn & 0xff);
8175
                    if (insn & (1 << 22)) {
8176
                        /* 16-bit immediate.  */
8177
                        imm |= (insn >> 4) & 0xf000;
8178
                        if (insn & (1 << 23)) {
8179
                            /* movt */
8180
                            tmp = load_reg(s, rd);
8181
                            tcg_gen_ext16u_i32(tmp, tmp);
8182
                            tcg_gen_ori_i32(tmp, tmp, imm << 16);
8183
                        } else {
8184
                            /* movw */
8185
                            tmp = new_tmp();
8186
                            tcg_gen_movi_i32(tmp, imm);
8187
                        }
8188
                    } else {
8189
                        /* Add/sub 12-bit immediate.  */
8190
                        if (rn == 15) {
8191
                            offset = s->pc & ~(uint32_t)3;
8192
                            if (insn & (1 << 23))
8193
                                offset -= imm;
8194
                            else
8195
                                offset += imm;
8196
                            tmp = new_tmp();
8197
                            tcg_gen_movi_i32(tmp, offset);
8198
                        } else {
8199
                            tmp = load_reg(s, rn);
8200
                            if (insn & (1 << 23))
8201
                                tcg_gen_subi_i32(tmp, tmp, imm);
8202
                            else
8203
                                tcg_gen_addi_i32(tmp, tmp, imm);
8204
                        }
8205
                    }
8206
                    store_reg(s, rd, tmp);
8207
                }
8208
            } else {
8209
                int shifter_out = 0;
8210
                /* modified 12-bit immediate.  */
8211
                shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8212
                imm = (insn & 0xff);
8213
                switch (shift) {
8214
                case 0: /* XY */
8215
                    /* Nothing to do.  */
8216
                    break;
8217
                case 1: /* 00XY00XY */
8218
                    imm |= imm << 16;
8219
                    break;
8220
                case 2: /* XY00XY00 */
8221
                    imm |= imm << 16;
8222
                    imm <<= 8;
8223
                    break;
8224
                case 3: /* XYXYXYXY */
8225
                    imm |= imm << 16;
8226
                    imm |= imm << 8;
8227
                    break;
8228
                default: /* Rotated constant.  */
8229
                    shift = (shift << 1) | (imm >> 7);
8230
                    imm |= 0x80;
8231
                    imm = imm << (32 - shift);
8232
                    shifter_out = 1;
8233
                    break;
8234
                }
8235
                tmp2 = new_tmp();
8236
                tcg_gen_movi_i32(tmp2, imm);
8237
                rn = (insn >> 16) & 0xf;
8238
                if (rn == 15) {
8239
                    tmp = new_tmp();
8240
                    tcg_gen_movi_i32(tmp, 0);
8241
                } else {
8242
                    tmp = load_reg(s, rn);
8243
                }
8244
                op = (insn >> 21) & 0xf;
8245
                if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8246
                                       shifter_out, tmp, tmp2))
8247
                    goto illegal_op;
8248
                dead_tmp(tmp2);
8249
                rd = (insn >> 8) & 0xf;
8250
                if (rd != 15) {
8251
                    store_reg(s, rd, tmp);
8252
                } else {
8253
                    dead_tmp(tmp);
8254
                }
8255
            }
8256
        }
8257
        break;
8258
    case 12: /* Load/store single data item.  */
8259
        {
8260
        int postinc = 0;
8261
        int writeback = 0;
8262
        int user;
8263
        if ((insn & 0x01100000) == 0x01000000) {
8264
            if (disas_neon_ls_insn(env, s, insn))
8265
                goto illegal_op;
8266
            break;
8267
        }
8268
        user = IS_USER(s);
8269
        if (rn == 15) {
8270
            addr = new_tmp();
8271
            /* PC relative.  */
8272
            /* s->pc has already been incremented by 4.  */
8273
            imm = s->pc & 0xfffffffc;
8274
            if (insn & (1 << 23))
8275
                imm += insn & 0xfff;
8276
            else
8277
                imm -= insn & 0xfff;
8278
            tcg_gen_movi_i32(addr, imm);
8279
        } else {
8280
            addr = load_reg(s, rn);
8281
            if (insn & (1 << 23)) {
8282
                /* Positive offset.  */
8283
                imm = insn & 0xfff;
8284
                tcg_gen_addi_i32(addr, addr, imm);
8285
            } else {
8286
                op = (insn >> 8) & 7;
8287
                imm = insn & 0xff;
8288
                switch (op) {
8289
                case 0: case 8: /* Shifted Register.  */
8290
                    shift = (insn >> 4) & 0xf;
8291
                    if (shift > 3)
8292
                        goto illegal_op;
8293
                    tmp = load_reg(s, rm);
8294
                    if (shift)
8295
                        tcg_gen_shli_i32(tmp, tmp, shift);
8296
                    tcg_gen_add_i32(addr, addr, tmp);
8297
                    dead_tmp(tmp);
8298
                    break;
8299
                case 4: /* Negative offset.  */
8300
                    tcg_gen_addi_i32(addr, addr, -imm);
8301
                    break;
8302
                case 6: /* User privilege.  */
8303
                    tcg_gen_addi_i32(addr, addr, imm);
8304
                    user = 1;
8305
                    break;
8306
                case 1: /* Post-decrement.  */
8307
                    imm = -imm;
8308
                    /* Fall through.  */
8309
                case 3: /* Post-increment.  */
8310
                    postinc = 1;
8311
                    writeback = 1;
8312
                    break;
8313
                case 5: /* Pre-decrement.  */
8314
                    imm = -imm;
8315
                    /* Fall through.  */
8316
                case 7: /* Pre-increment.  */
8317
                    tcg_gen_addi_i32(addr, addr, imm);
8318
                    writeback = 1;
8319
                    break;
8320
                default:
8321
                    goto illegal_op;
8322
                }
8323
            }
8324
        }
8325
        op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8326
        if (insn & (1 << 20)) {
8327
            /* Load.  */
8328
            if (rs == 15 && op != 2) {
8329
                if (op & 2)
8330
                    goto illegal_op;
8331
                /* Memory hint.  Implemented as NOP.  */
8332
            } else {
8333
                switch (op) {
8334
                case 0: tmp = gen_ld8u(addr, user); break;
8335
                case 4: tmp = gen_ld8s(addr, user); break;
8336
                case 1: tmp = gen_ld16u(addr, user); break;
8337
                case 5: tmp = gen_ld16s(addr, user); break;
8338
                case 2: tmp = gen_ld32(addr, user); break;
8339
                default: goto illegal_op;
8340
                }
8341
                if (rs == 15) {
8342
                    gen_bx(s, tmp);
8343
                } else {
8344
                    store_reg(s, rs, tmp);
8345
                }
8346
            }
8347
        } else {
8348
            /* Store.  */
8349
            if (rs == 15)
8350
                goto illegal_op;
8351
            tmp = load_reg(s, rs);
8352
            switch (op) {
8353
            case 0: gen_st8(tmp, addr, user); break;
8354
            case 1: gen_st16(tmp, addr, user); break;
8355
            case 2: gen_st32(tmp, addr, user); break;
8356
            default: goto illegal_op;
8357
            }
8358
        }
8359
        if (postinc)
8360
            tcg_gen_addi_i32(addr, addr, imm);
8361
        if (writeback) {
8362
            store_reg(s, rn, addr);
8363
        } else {
8364
            dead_tmp(addr);
8365
        }
8366
        }
8367
        break;
8368
    default:
8369
        goto illegal_op;
8370
    }
8371
    return 0;
8372
illegal_op:
8373
    return 1;
8374
}
8375

    
8376
static void disas_thumb_insn(CPUState *env, DisasContext *s)
8377
{
8378
    uint32_t val, insn, op, rm, rn, rd, shift, cond;
8379
    int32_t offset;
8380
    int i;
8381
    TCGv tmp;
8382
    TCGv tmp2;
8383
    TCGv addr;
8384

    
8385
    if (s->condexec_mask) {
8386
        cond = s->condexec_cond;
8387
        if (cond != 0x0e) {     /* Skip conditional when condition is AL. */
8388
          s->condlabel = gen_new_label();
8389
          gen_test_cc(cond ^ 1, s->condlabel);
8390
          s->condjmp = 1;
8391
        }
8392
    }
8393

    
8394
    insn = lduw_code(s->pc);
8395
    s->pc += 2;
8396

    
8397
    switch (insn >> 12) {
8398
    case 0: case 1:
8399

    
8400
        rd = insn & 7;
8401
        op = (insn >> 11) & 3;
8402
        if (op == 3) {
8403
            /* add/subtract */
8404
            rn = (insn >> 3) & 7;
8405
            tmp = load_reg(s, rn);
8406
            if (insn & (1 << 10)) {
8407
                /* immediate */
8408
                tmp2 = new_tmp();
8409
                tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8410
            } else {
8411
                /* reg */
8412
                rm = (insn >> 6) & 7;
8413
                tmp2 = load_reg(s, rm);
8414
            }
8415
            if (insn & (1 << 9)) {
8416
                if (s->condexec_mask)
8417
                    tcg_gen_sub_i32(tmp, tmp, tmp2);
8418
                else
8419
                    gen_helper_sub_cc(tmp, tmp, tmp2);
8420
            } else {
8421
                if (s->condexec_mask)
8422
                    tcg_gen_add_i32(tmp, tmp, tmp2);
8423
                else
8424
                    gen_helper_add_cc(tmp, tmp, tmp2);
8425
            }
8426
            dead_tmp(tmp2);
8427
            store_reg(s, rd, tmp);
8428
        } else {
8429
            /* shift immediate */
8430
            rm = (insn >> 3) & 7;
8431
            shift = (insn >> 6) & 0x1f;
8432
            tmp = load_reg(s, rm);
8433
            gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8434
            if (!s->condexec_mask)
8435
                gen_logic_CC(tmp);
8436
            store_reg(s, rd, tmp);
8437
        }
8438
        break;
8439
    case 2: case 3:
8440
        /* arithmetic large immediate */
8441
        op = (insn >> 11) & 3;
8442
        rd = (insn >> 8) & 0x7;
8443
        if (op == 0) { /* mov */
8444
            tmp = new_tmp();
8445
            tcg_gen_movi_i32(tmp, insn & 0xff);
8446
            if (!s->condexec_mask)
8447
                gen_logic_CC(tmp);
8448
            store_reg(s, rd, tmp);
8449
        } else {
8450
            tmp = load_reg(s, rd);
8451
            tmp2 = new_tmp();
8452
            tcg_gen_movi_i32(tmp2, insn & 0xff);
8453
            switch (op) {
8454
            case 1: /* cmp */
8455
                gen_helper_sub_cc(tmp, tmp, tmp2);
8456
                dead_tmp(tmp);
8457
                dead_tmp(tmp2);
8458
                break;
8459
            case 2: /* add */
8460
                if (s->condexec_mask)
8461
                    tcg_gen_add_i32(tmp, tmp, tmp2);
8462
                else
8463
                    gen_helper_add_cc(tmp, tmp, tmp2);
8464
                dead_tmp(tmp2);
8465
                store_reg(s, rd, tmp);
8466
                break;
8467
            case 3: /* sub */
8468
                if (s->condexec_mask)
8469
                    tcg_gen_sub_i32(tmp, tmp, tmp2);
8470
                else
8471
                    gen_helper_sub_cc(tmp, tmp, tmp2);
8472
                dead_tmp(tmp2);
8473
                store_reg(s, rd, tmp);
8474
                break;
8475
            }
8476
        }
8477
        break;
8478
    case 4:
8479
        if (insn & (1 << 11)) {
8480
            rd = (insn >> 8) & 7;
8481
            /* load pc-relative.  Bit 1 of PC is ignored.  */
8482
            val = s->pc + 2 + ((insn & 0xff) * 4);
8483
            val &= ~(uint32_t)2;
8484
            addr = new_tmp();
8485
            tcg_gen_movi_i32(addr, val);
8486
            tmp = gen_ld32(addr, IS_USER(s));
8487
            dead_tmp(addr);
8488
            store_reg(s, rd, tmp);
8489
            break;
8490
        }
8491
        if (insn & (1 << 10)) {
8492
            /* data processing extended or blx */
8493
            rd = (insn & 7) | ((insn >> 4) & 8);
8494
            rm = (insn >> 3) & 0xf;
8495
            op = (insn >> 8) & 3;
8496
            switch (op) {
8497
            case 0: /* add */
8498
                tmp = load_reg(s, rd);
8499
                tmp2 = load_reg(s, rm);
8500
                tcg_gen_add_i32(tmp, tmp, tmp2);
8501
                dead_tmp(tmp2);
8502
                store_reg(s, rd, tmp);
8503
                break;
8504
            case 1: /* cmp */
8505
                tmp = load_reg(s, rd);
8506
                tmp2 = load_reg(s, rm);
8507
                gen_helper_sub_cc(tmp, tmp, tmp2);
8508
                dead_tmp(tmp2);
8509
                dead_tmp(tmp);
8510
                break;
8511
            case 2: /* mov/cpy */
8512
                tmp = load_reg(s, rm);
8513
                store_reg(s, rd, tmp);
8514
                break;
8515
            case 3:/* branch [and link] exchange thumb register */
8516
                tmp = load_reg(s, rm);
8517
                if (insn & (1 << 7)) {
8518
                    val = (uint32_t)s->pc | 1;
8519
                    tmp2 = new_tmp();
8520
                    tcg_gen_movi_i32(tmp2, val);
8521
                    store_reg(s, 14, tmp2);
8522
                }
8523
                gen_bx(s, tmp);
8524
                break;
8525
            }
8526
            break;
8527
        }
8528

    
8529
        /* data processing register */
8530
        rd = insn & 7;
8531
        rm = (insn >> 3) & 7;
8532
        op = (insn >> 6) & 0xf;
8533
        if (op == 2 || op == 3 || op == 4 || op == 7) {
8534
            /* the shift/rotate ops want the operands backwards */
8535
            val = rm;
8536
            rm = rd;
8537
            rd = val;
8538
            val = 1;
8539
        } else {
8540
            val = 0;
8541
        }
8542

    
8543
        if (op == 9) { /* neg */
8544
            tmp = new_tmp();
8545
            tcg_gen_movi_i32(tmp, 0);
8546
        } else if (op != 0xf) { /* mvn doesn't read its first operand */
8547
            tmp = load_reg(s, rd);
8548
        } else {
8549
            TCGV_UNUSED(tmp);
8550
        }
8551

    
8552
        tmp2 = load_reg(s, rm);
8553
        switch (op) {
8554
        case 0x0: /* and */
8555
            tcg_gen_and_i32(tmp, tmp, tmp2);
8556
            if (!s->condexec_mask)
8557
                gen_logic_CC(tmp);
8558
            break;
8559
        case 0x1: /* eor */
8560
            tcg_gen_xor_i32(tmp, tmp, tmp2);
8561
            if (!s->condexec_mask)
8562
                gen_logic_CC(tmp);
8563
            break;
8564
        case 0x2: /* lsl */
8565
            if (s->condexec_mask) {
8566
                gen_helper_shl(tmp2, tmp2, tmp);
8567
            } else {
8568
                gen_helper_shl_cc(tmp2, tmp2, tmp);
8569
                gen_logic_CC(tmp2);
8570
            }
8571
            break;
8572
        case 0x3: /* lsr */
8573
            if (s->condexec_mask) {
8574
                gen_helper_shr(tmp2, tmp2, tmp);
8575
            } else {
8576
                gen_helper_shr_cc(tmp2, tmp2, tmp);
8577
                gen_logic_CC(tmp2);
8578
            }
8579
            break;
8580
        case 0x4: /* asr */
8581
            if (s->condexec_mask) {
8582
                gen_helper_sar(tmp2, tmp2, tmp);
8583
            } else {
8584
                gen_helper_sar_cc(tmp2, tmp2, tmp);
8585
                gen_logic_CC(tmp2);
8586
            }
8587
            break;
8588
        case 0x5: /* adc */
8589
            if (s->condexec_mask)
8590
                gen_adc(tmp, tmp2);
8591
            else
8592
                gen_helper_adc_cc(tmp, tmp, tmp2);
8593
            break;
8594
        case 0x6: /* sbc */
8595
            if (s->condexec_mask)
8596
                gen_sub_carry(tmp, tmp, tmp2);
8597
            else
8598
                gen_helper_sbc_cc(tmp, tmp, tmp2);
8599
            break;
8600
        case 0x7: /* ror */
8601
            if (s->condexec_mask) {
8602
                tcg_gen_andi_i32(tmp, tmp, 0x1f);
8603
                tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8604
            } else {
8605
                gen_helper_ror_cc(tmp2, tmp2, tmp);
8606
                gen_logic_CC(tmp2);
8607
            }
8608
            break;
8609
        case 0x8: /* tst */
8610
            tcg_gen_and_i32(tmp, tmp, tmp2);
8611
            gen_logic_CC(tmp);
8612
            rd = 16;
8613
            break;
8614
        case 0x9: /* neg */
8615
            if (s->condexec_mask)
8616
                tcg_gen_neg_i32(tmp, tmp2);
8617
            else
8618
                gen_helper_sub_cc(tmp, tmp, tmp2);
8619
            break;
8620
        case 0xa: /* cmp */
8621
            gen_helper_sub_cc(tmp, tmp, tmp2);
8622
            rd = 16;
8623
            break;
8624
        case 0xb: /* cmn */
8625
            gen_helper_add_cc(tmp, tmp, tmp2);
8626
            rd = 16;
8627
            break;
8628
        case 0xc: /* orr */
8629
            tcg_gen_or_i32(tmp, tmp, tmp2);
8630
            if (!s->condexec_mask)
8631
                gen_logic_CC(tmp);
8632
            break;
8633
        case 0xd: /* mul */
8634
            tcg_gen_mul_i32(tmp, tmp, tmp2);
8635
            if (!s->condexec_mask)
8636
                gen_logic_CC(tmp);
8637
            break;
8638
        case 0xe: /* bic */
8639
            tcg_gen_andc_i32(tmp, tmp, tmp2);
8640
            if (!s->condexec_mask)
8641
                gen_logic_CC(tmp);
8642
            break;
8643
        case 0xf: /* mvn */
8644
            tcg_gen_not_i32(tmp2, tmp2);
8645
            if (!s->condexec_mask)
8646
                gen_logic_CC(tmp2);
8647
            val = 1;
8648
            rm = rd;
8649
            break;
8650
        }
8651
        if (rd != 16) {
8652
            if (val) {
8653
                store_reg(s, rm, tmp2);
8654
                if (op != 0xf)
8655
                    dead_tmp(tmp);
8656
            } else {
8657
                store_reg(s, rd, tmp);
8658
                dead_tmp(tmp2);
8659
            }
8660
        } else {
8661
            dead_tmp(tmp);
8662
            dead_tmp(tmp2);
8663
        }
8664
        break;
8665

    
8666
    case 5:
8667
        /* load/store register offset.  */
8668
        rd = insn & 7;
8669
        rn = (insn >> 3) & 7;
8670
        rm = (insn >> 6) & 7;
8671
        op = (insn >> 9) & 7;
8672
        addr = load_reg(s, rn);
8673
        tmp = load_reg(s, rm);
8674
        tcg_gen_add_i32(addr, addr, tmp);
8675
        dead_tmp(tmp);
8676

    
8677
        if (op < 3) /* store */
8678
            tmp = load_reg(s, rd);
8679

    
8680
        switch (op) {
8681
        case 0: /* str */
8682
            gen_st32(tmp, addr, IS_USER(s));
8683
            break;
8684
        case 1: /* strh */
8685
            gen_st16(tmp, addr, IS_USER(s));
8686
            break;
8687
        case 2: /* strb */
8688
            gen_st8(tmp, addr, IS_USER(s));
8689
            break;
8690
        case 3: /* ldrsb */
8691
            tmp = gen_ld8s(addr, IS_USER(s));
8692
            break;
8693
        case 4: /* ldr */
8694
            tmp = gen_ld32(addr, IS_USER(s));
8695
            break;
8696
        case 5: /* ldrh */
8697
            tmp = gen_ld16u(addr, IS_USER(s));
8698
            break;
8699
        case 6: /* ldrb */
8700
            tmp = gen_ld8u(addr, IS_USER(s));
8701
            break;
8702
        case 7: /* ldrsh */
8703
            tmp = gen_ld16s(addr, IS_USER(s));
8704
            break;
8705
        }
8706
        if (op >= 3) /* load */
8707
            store_reg(s, rd, tmp);
8708
        dead_tmp(addr);
8709
        break;
8710

    
8711
    case 6:
8712
        /* load/store word immediate offset */
8713
        rd = insn & 7;
8714
        rn = (insn >> 3) & 7;
8715
        addr = load_reg(s, rn);
8716
        val = (insn >> 4) & 0x7c;
8717
        tcg_gen_addi_i32(addr, addr, val);
8718

    
8719
        if (insn & (1 << 11)) {
8720
            /* load */
8721
            tmp = gen_ld32(addr, IS_USER(s));
8722
            store_reg(s, rd, tmp);
8723
        } else {
8724
            /* store */
8725
            tmp = load_reg(s, rd);
8726
            gen_st32(tmp, addr, IS_USER(s));
8727
        }
8728
        dead_tmp(addr);
8729
        break;
8730

    
8731
    case 7:
8732
        /* load/store byte immediate offset */
8733
        rd = insn & 7;
8734
        rn = (insn >> 3) & 7;
8735
        addr = load_reg(s, rn);
8736
        val = (insn >> 6) & 0x1f;
8737
        tcg_gen_addi_i32(addr, addr, val);
8738

    
8739
        if (insn & (1 << 11)) {
8740
            /* load */
8741
            tmp = gen_ld8u(addr, IS_USER(s));
8742
            store_reg(s, rd, tmp);
8743
        } else {
8744
            /* store */
8745
            tmp = load_reg(s, rd);
8746
            gen_st8(tmp, addr, IS_USER(s));
8747
        }
8748
        dead_tmp(addr);
8749
        break;
8750

    
8751
    case 8:
8752
        /* load/store halfword immediate offset */
8753
        rd = insn & 7;
8754
        rn = (insn >> 3) & 7;
8755
        addr = load_reg(s, rn);
8756
        val = (insn >> 5) & 0x3e;
8757
        tcg_gen_addi_i32(addr, addr, val);
8758

    
8759
        if (insn & (1 << 11)) {
8760
            /* load */
8761
            tmp = gen_ld16u(addr, IS_USER(s));
8762
            store_reg(s, rd, tmp);
8763
        } else {
8764
            /* store */
8765
            tmp = load_reg(s, rd);
8766
            gen_st16(tmp, addr, IS_USER(s));
8767
        }
8768
        dead_tmp(addr);
8769
        break;
8770

    
8771
    case 9:
8772
        /* load/store from stack */
8773
        rd = (insn >> 8) & 7;
8774
        addr = load_reg(s, 13);
8775
        val = (insn & 0xff) * 4;
8776
        tcg_gen_addi_i32(addr, addr, val);
8777

    
8778
        if (insn & (1 << 11)) {
8779
            /* load */
8780
            tmp = gen_ld32(addr, IS_USER(s));
8781
            store_reg(s, rd, tmp);
8782
        } else {
8783
            /* store */
8784
            tmp = load_reg(s, rd);
8785
            gen_st32(tmp, addr, IS_USER(s));
8786
        }
8787
        dead_tmp(addr);
8788
        break;
8789

    
8790
    case 10:
8791
        /* add to high reg */
8792
        rd = (insn >> 8) & 7;
8793
        if (insn & (1 << 11)) {
8794
            /* SP */
8795
            tmp = load_reg(s, 13);
8796
        } else {
8797
            /* PC. bit 1 is ignored.  */
8798
            tmp = new_tmp();
8799
            tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8800
        }
8801
        val = (insn & 0xff) * 4;
8802
        tcg_gen_addi_i32(tmp, tmp, val);
8803
        store_reg(s, rd, tmp);
8804
        break;
8805

    
8806
    case 11:
8807
        /* misc */
8808
        op = (insn >> 8) & 0xf;
8809
        switch (op) {
8810
        case 0:
8811
            /* adjust stack pointer */
8812
            tmp = load_reg(s, 13);
8813
            val = (insn & 0x7f) * 4;
8814
            if (insn & (1 << 7))
8815
                val = -(int32_t)val;
8816
            tcg_gen_addi_i32(tmp, tmp, val);
8817
            store_reg(s, 13, tmp);
8818
            break;
8819

    
8820
        case 2: /* sign/zero extend.  */
8821
            ARCH(6);
8822
            rd = insn & 7;
8823
            rm = (insn >> 3) & 7;
8824
            tmp = load_reg(s, rm);
8825
            switch ((insn >> 6) & 3) {
8826
            case 0: gen_sxth(tmp); break;
8827
            case 1: gen_sxtb(tmp); break;
8828
            case 2: gen_uxth(tmp); break;
8829
            case 3: gen_uxtb(tmp); break;
8830
            }
8831
            store_reg(s, rd, tmp);
8832
            break;
8833
        case 4: case 5: case 0xc: case 0xd:
8834
            /* push/pop */
8835
            addr = load_reg(s, 13);
8836
            if (insn & (1 << 8))
8837
                offset = 4;
8838
            else
8839
                offset = 0;
8840
            for (i = 0; i < 8; i++) {
8841
                if (insn & (1 << i))
8842
                    offset += 4;
8843
            }
8844
            if ((insn & (1 << 11)) == 0) {
8845
                tcg_gen_addi_i32(addr, addr, -offset);
8846
            }
8847
            for (i = 0; i < 8; i++) {
8848
                if (insn & (1 << i)) {
8849
                    if (insn & (1 << 11)) {
8850
                        /* pop */
8851
                        tmp = gen_ld32(addr, IS_USER(s));
8852
                        store_reg(s, i, tmp);
8853
                    } else {
8854
                        /* push */
8855
                        tmp = load_reg(s, i);
8856
                        gen_st32(tmp, addr, IS_USER(s));
8857
                    }
8858
                    /* advance to the next address.  */
8859
                    tcg_gen_addi_i32(addr, addr, 4);
8860
                }
8861
            }
8862
            TCGV_UNUSED(tmp);
8863
            if (insn & (1 << 8)) {
8864
                if (insn & (1 << 11)) {
8865
                    /* pop pc */
8866
                    tmp = gen_ld32(addr, IS_USER(s));
8867
                    /* don't set the pc until the rest of the instruction
8868
                       has completed */
8869
                } else {
8870
                    /* push lr */
8871
                    tmp = load_reg(s, 14);
8872
                    gen_st32(tmp, addr, IS_USER(s));
8873
                }
8874
                tcg_gen_addi_i32(addr, addr, 4);
8875
            }
8876
            if ((insn & (1 << 11)) == 0) {
8877
                tcg_gen_addi_i32(addr, addr, -offset);
8878
            }
8879
            /* write back the new stack pointer */
8880
            store_reg(s, 13, addr);
8881
            /* set the new PC value */
8882
            if ((insn & 0x0900) == 0x0900)
8883
                gen_bx(s, tmp);
8884
            break;
8885

    
8886
        case 1: case 3: case 9: case 11: /* czb */
8887
            rm = insn & 7;
8888
            tmp = load_reg(s, rm);
8889
            s->condlabel = gen_new_label();
8890
            s->condjmp = 1;
8891
            if (insn & (1 << 11))
8892
                tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8893
            else
8894
                tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8895
            dead_tmp(tmp);
8896
            offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8897
            val = (uint32_t)s->pc + 2;
8898
            val += offset;
8899
            gen_jmp(s, val);
8900
            break;
8901

    
8902
        case 15: /* IT, nop-hint.  */
8903
            if ((insn & 0xf) == 0) {
8904
                gen_nop_hint(s, (insn >> 4) & 0xf);
8905
                break;
8906
            }
8907
            /* If Then.  */
8908
            s->condexec_cond = (insn >> 4) & 0xe;
8909
            s->condexec_mask = insn & 0x1f;
8910
            /* No actual code generated for this insn, just setup state.  */
8911
            break;
8912

    
8913
        case 0xe: /* bkpt */
8914
            gen_set_condexec(s);
8915
            gen_set_pc_im(s->pc - 2);
8916
            gen_exception(EXCP_BKPT);
8917
            s->is_jmp = DISAS_JUMP;
8918
            break;
8919

    
8920
        case 0xa: /* rev */
8921
            ARCH(6);
8922
            rn = (insn >> 3) & 0x7;
8923
            rd = insn & 0x7;
8924
            tmp = load_reg(s, rn);
8925
            switch ((insn >> 6) & 3) {
8926
            case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8927
            case 1: gen_rev16(tmp); break;
8928
            case 3: gen_revsh(tmp); break;
8929
            default: goto illegal_op;
8930
            }
8931
            store_reg(s, rd, tmp);
8932
            break;
8933

    
8934
        case 6: /* cps */
8935
            ARCH(6);
8936
            if (IS_USER(s))
8937
                break;
8938
            if (IS_M(env)) {
8939
                tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8940
                /* PRIMASK */
8941
                if (insn & 1) {
8942
                    addr = tcg_const_i32(16);
8943
                    gen_helper_v7m_msr(cpu_env, addr, tmp);
8944
                    tcg_temp_free_i32(addr);
8945
                }
8946
                /* FAULTMASK */
8947
                if (insn & 2) {
8948
                    addr = tcg_const_i32(17);
8949
                    gen_helper_v7m_msr(cpu_env, addr, tmp);
8950
                    tcg_temp_free_i32(addr);
8951
                }
8952
                tcg_temp_free_i32(tmp);
8953
                gen_lookup_tb(s);
8954
            } else {
8955
                if (insn & (1 << 4))
8956
                    shift = CPSR_A | CPSR_I | CPSR_F;
8957
                else
8958
                    shift = 0;
8959
                gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8960
            }
8961
            break;
8962

    
8963
        default:
8964
            goto undef;
8965
        }
8966
        break;
8967

    
8968
    case 12:
8969
        /* load/store multiple */
8970
        rn = (insn >> 8) & 0x7;
8971
        addr = load_reg(s, rn);
8972
        for (i = 0; i < 8; i++) {
8973
            if (insn & (1 << i)) {
8974
                if (insn & (1 << 11)) {
8975
                    /* load */
8976
                    tmp = gen_ld32(addr, IS_USER(s));
8977
                    store_reg(s, i, tmp);
8978
                } else {
8979
                    /* store */
8980
                    tmp = load_reg(s, i);
8981
                    gen_st32(tmp, addr, IS_USER(s));
8982
                }
8983
                /* advance to the next address */
8984
                tcg_gen_addi_i32(addr, addr, 4);
8985
            }
8986
        }
8987
        /* Base register writeback.  */
8988
        if ((insn & (1 << rn)) == 0) {
8989
            store_reg(s, rn, addr);
8990
        } else {
8991
            dead_tmp(addr);
8992
        }
8993
        break;
8994

    
8995
    case 13:
8996
        /* conditional branch or swi */
8997
        cond = (insn >> 8) & 0xf;
8998
        if (cond == 0xe)
8999
            goto undef;
9000

    
9001
        if (cond == 0xf) {
9002
            /* swi */
9003
            gen_set_condexec(s);
9004
            gen_set_pc_im(s->pc);
9005
            s->is_jmp = DISAS_SWI;
9006
            break;
9007
        }
9008
        /* generate a conditional jump to next instruction */
9009
        s->condlabel = gen_new_label();
9010
        gen_test_cc(cond ^ 1, s->condlabel);
9011
        s->condjmp = 1;
9012

    
9013
        /* jump to the offset */
9014
        val = (uint32_t)s->pc + 2;
9015
        offset = ((int32_t)insn << 24) >> 24;
9016
        val += offset << 1;
9017
        gen_jmp(s, val);
9018
        break;
9019

    
9020
    case 14:
9021
        if (insn & (1 << 11)) {
9022
            if (disas_thumb2_insn(env, s, insn))
9023
              goto undef32;
9024
            break;
9025
        }
9026
        /* unconditional branch */
9027
        val = (uint32_t)s->pc;
9028
        offset = ((int32_t)insn << 21) >> 21;
9029
        val += (offset << 1) + 2;
9030
        gen_jmp(s, val);
9031
        break;
9032

    
9033
    case 15:
9034
        if (disas_thumb2_insn(env, s, insn))
9035
            goto undef32;
9036
        break;
9037
    }
9038
    return;
9039
undef32:
9040
    gen_set_condexec(s);
9041
    gen_set_pc_im(s->pc - 4);
9042
    gen_exception(EXCP_UDEF);
9043
    s->is_jmp = DISAS_JUMP;
9044
    return;
9045
illegal_op:
9046
undef:
9047
    gen_set_condexec(s);
9048
    gen_set_pc_im(s->pc - 2);
9049
    gen_exception(EXCP_UDEF);
9050
    s->is_jmp = DISAS_JUMP;
9051
}
9052

    
9053
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9054
   basic block 'tb'. If search_pc is TRUE, also generate PC
9055
   information for each intermediate instruction. */
9056
static inline void gen_intermediate_code_internal(CPUState *env,
9057
                                                  TranslationBlock *tb,
9058
                                                  int search_pc)
9059
{
9060
    DisasContext dc1, *dc = &dc1;
9061
    CPUBreakpoint *bp;
9062
    uint16_t *gen_opc_end;
9063
    int j, lj;
9064
    target_ulong pc_start;
9065
    uint32_t next_page_start;
9066
    int num_insns;
9067
    int max_insns;
9068

    
9069
    /* generate intermediate code */
9070
    num_temps = 0;
9071

    
9072
    pc_start = tb->pc;
9073

    
9074
    dc->tb = tb;
9075

    
9076
    gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9077

    
9078
    dc->is_jmp = DISAS_NEXT;
9079
    dc->pc = pc_start;
9080
    dc->singlestep_enabled = env->singlestep_enabled;
9081
    dc->condjmp = 0;
9082
    dc->thumb = env->thumb;
9083
    dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
9084
    dc->condexec_cond = env->condexec_bits >> 4;
9085
#if !defined(CONFIG_USER_ONLY)
9086
    if (IS_M(env)) {
9087
        dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
9088
    } else {
9089
        dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
9090
    }
9091
#endif
9092
    cpu_F0s = tcg_temp_new_i32();
9093
    cpu_F1s = tcg_temp_new_i32();
9094
    cpu_F0d = tcg_temp_new_i64();
9095
    cpu_F1d = tcg_temp_new_i64();
9096
    cpu_V0 = cpu_F0d;
9097
    cpu_V1 = cpu_F1d;
9098
    /* FIXME: cpu_M0 can probably be the same as cpu_V0.  */
9099
    cpu_M0 = tcg_temp_new_i64();
9100
    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9101
    lj = -1;
9102
    num_insns = 0;
9103
    max_insns = tb->cflags & CF_COUNT_MASK;
9104
    if (max_insns == 0)
9105
        max_insns = CF_COUNT_MASK;
9106

    
9107
    gen_icount_start();
9108
    /* Reset the conditional execution bits immediately. This avoids
9109
       complications trying to do it at the end of the block.  */
9110
    if (env->condexec_bits)
9111
      {
9112
        TCGv tmp = new_tmp();
9113
        tcg_gen_movi_i32(tmp, 0);
9114
        store_cpu_field(tmp, condexec_bits);
9115
      }
9116
    do {
9117
#ifdef CONFIG_USER_ONLY
9118
        /* Intercept jump to the magic kernel page.  */
9119
        if (dc->pc >= 0xffff0000) {
9120
            /* We always get here via a jump, so know we are not in a
9121
               conditional execution block.  */
9122
            gen_exception(EXCP_KERNEL_TRAP);
9123
            dc->is_jmp = DISAS_UPDATE;
9124
            break;
9125
        }
9126
#else
9127
        if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9128
            /* We always get here via a jump, so know we are not in a
9129
               conditional execution block.  */
9130
            gen_exception(EXCP_EXCEPTION_EXIT);
9131
            dc->is_jmp = DISAS_UPDATE;
9132
            break;
9133
        }
9134
#endif
9135

    
9136
        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9137
            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9138
                if (bp->pc == dc->pc) {
9139
                    gen_set_condexec(dc);
9140
                    gen_set_pc_im(dc->pc);
9141
                    gen_exception(EXCP_DEBUG);
9142
                    dc->is_jmp = DISAS_JUMP;
9143
                    /* Advance PC so that clearing the breakpoint will
9144
                       invalidate this TB.  */
9145
                    dc->pc += 2;
9146
                    goto done_generating;
9147
                    break;
9148
                }
9149
            }
9150
        }
9151
        if (search_pc) {
9152
            j = gen_opc_ptr - gen_opc_buf;
9153
            if (lj < j) {
9154
                lj++;
9155
                while (lj < j)
9156
                    gen_opc_instr_start[lj++] = 0;
9157
            }
9158
            gen_opc_pc[lj] = dc->pc;
9159
            gen_opc_instr_start[lj] = 1;
9160
            gen_opc_icount[lj] = num_insns;
9161
        }
9162

    
9163
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9164
            gen_io_start();
9165

    
9166
        if (env->thumb) {
9167
            disas_thumb_insn(env, dc);
9168
            if (dc->condexec_mask) {
9169
                dc->condexec_cond = (dc->condexec_cond & 0xe)
9170
                                   | ((dc->condexec_mask >> 4) & 1);
9171
                dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9172
                if (dc->condexec_mask == 0) {
9173
                    dc->condexec_cond = 0;
9174
                }
9175
            }
9176
        } else {
9177
            disas_arm_insn(env, dc);
9178
        }
9179
        if (num_temps) {
9180
            fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9181
            num_temps = 0;
9182
        }
9183

    
9184
        if (dc->condjmp && !dc->is_jmp) {
9185
            gen_set_label(dc->condlabel);
9186
            dc->condjmp = 0;
9187
        }
9188
        /* Translation stops when a conditional branch is encountered.
9189
         * Otherwise the subsequent code could get translated several times.
9190
         * Also stop translation when a page boundary is reached.  This
9191
         * ensures prefetch aborts occur at the right place.  */
9192
        num_insns ++;
9193
    } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9194
             !env->singlestep_enabled &&
9195
             !singlestep &&
9196
             dc->pc < next_page_start &&
9197
             num_insns < max_insns);
9198

    
9199
    if (tb->cflags & CF_LAST_IO) {
9200
        if (dc->condjmp) {
9201
            /* FIXME:  This can theoretically happen with self-modifying
9202
               code.  */
9203
            cpu_abort(env, "IO on conditional branch instruction");
9204
        }
9205
        gen_io_end();
9206
    }
9207

    
9208
    /* At this stage dc->condjmp will only be set when the skipped
9209
       instruction was a conditional branch or trap, and the PC has
9210
       already been written.  */
9211
    if (unlikely(env->singlestep_enabled)) {
9212
        /* Make sure the pc is updated, and raise a debug exception.  */
9213
        if (dc->condjmp) {
9214
            gen_set_condexec(dc);
9215
            if (dc->is_jmp == DISAS_SWI) {
9216
                gen_exception(EXCP_SWI);
9217
            } else {
9218
                gen_exception(EXCP_DEBUG);
9219
            }
9220
            gen_set_label(dc->condlabel);
9221
        }
9222
        if (dc->condjmp || !dc->is_jmp) {
9223
            gen_set_pc_im(dc->pc);
9224
            dc->condjmp = 0;
9225
        }
9226
        gen_set_condexec(dc);
9227
        if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9228
            gen_exception(EXCP_SWI);
9229
        } else {
9230
            /* FIXME: Single stepping a WFI insn will not halt
9231
               the CPU.  */
9232
            gen_exception(EXCP_DEBUG);
9233
        }
9234
    } else {
9235
        /* While branches must always occur at the end of an IT block,
9236
           there are a few other things that can cause us to terminate
9237
           the TB in the middel of an IT block:
9238
            - Exception generating instructions (bkpt, swi, undefined).
9239
            - Page boundaries.
9240
            - Hardware watchpoints.
9241
           Hardware breakpoints have already been handled and skip this code.
9242
         */
9243
        gen_set_condexec(dc);
9244
        switch(dc->is_jmp) {
9245
        case DISAS_NEXT:
9246
            gen_goto_tb(dc, 1, dc->pc);
9247
            break;
9248
        default:
9249
        case DISAS_JUMP:
9250
        case DISAS_UPDATE:
9251
            /* indicate that the hash table must be used to find the next TB */
9252
            tcg_gen_exit_tb(0);
9253
            break;
9254
        case DISAS_TB_JUMP:
9255
            /* nothing more to generate */
9256
            break;
9257
        case DISAS_WFI:
9258
            gen_helper_wfi();
9259
            break;
9260
        case DISAS_SWI:
9261
            gen_exception(EXCP_SWI);
9262
            break;
9263
        }
9264
        if (dc->condjmp) {
9265
            gen_set_label(dc->condlabel);
9266
            gen_set_condexec(dc);
9267
            gen_goto_tb(dc, 1, dc->pc);
9268
            dc->condjmp = 0;
9269
        }
9270
    }
9271

    
9272
done_generating:
9273
    gen_icount_end(tb, num_insns);
9274
    *gen_opc_ptr = INDEX_op_end;
9275

    
9276
#ifdef DEBUG_DISAS
9277
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9278
        qemu_log("----------------\n");
9279
        qemu_log("IN: %s\n", lookup_symbol(pc_start));
9280
        log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9281
        qemu_log("\n");
9282
    }
9283
#endif
9284
    if (search_pc) {
9285
        j = gen_opc_ptr - gen_opc_buf;
9286
        lj++;
9287
        while (lj <= j)
9288
            gen_opc_instr_start[lj++] = 0;
9289
    } else {
9290
        tb->size = dc->pc - pc_start;
9291
        tb->icount = num_insns;
9292
    }
9293
}
9294

    
9295
void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9296
{
9297
    gen_intermediate_code_internal(env, tb, 0);
9298
}
9299

    
9300
void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9301
{
9302
    gen_intermediate_code_internal(env, tb, 1);
9303
}
9304

    
9305
static const char *cpu_mode_names[16] = {
9306
  "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9307
  "???", "???", "???", "und", "???", "???", "???", "sys"
9308
};
9309

    
9310
void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9311
                    int flags)
9312
{
9313
    int i;
9314
#if 0
9315
    union {
9316
        uint32_t i;
9317
        float s;
9318
    } s0, s1;
9319
    CPU_DoubleU d;
9320
    /* ??? This assumes float64 and double have the same layout.
9321
       Oh well, it's only debug dumps.  */
9322
    union {
9323
        float64 f64;
9324
        double d;
9325
    } d0;
9326
#endif
9327
    uint32_t psr;
9328

    
9329
    for(i=0;i<16;i++) {
9330
        cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9331
        if ((i % 4) == 3)
9332
            cpu_fprintf(f, "\n");
9333
        else
9334
            cpu_fprintf(f, " ");
9335
    }
9336
    psr = cpsr_read(env);
9337
    cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9338
                psr,
9339
                psr & (1 << 31) ? 'N' : '-',
9340
                psr & (1 << 30) ? 'Z' : '-',
9341
                psr & (1 << 29) ? 'C' : '-',
9342
                psr & (1 << 28) ? 'V' : '-',
9343
                psr & CPSR_T ? 'T' : 'A',
9344
                cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9345

    
9346
#if 0
9347
    for (i = 0; i < 16; i++) {
9348
        d.d = env->vfp.regs[i];
9349
        s0.i = d.l.lower;
9350
        s1.i = d.l.upper;
9351
        d0.f64 = d.d;
9352
        cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9353
                    i * 2, (int)s0.i, s0.s,
9354
                    i * 2 + 1, (int)s1.i, s1.s,
9355
                    i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9356
                    d0.d);
9357
    }
9358
    cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9359
#endif
9360
}
9361

    
9362
void gen_pc_load(CPUState *env, TranslationBlock *tb,
9363
                unsigned long searched_pc, int pc_pos, void *puc)
9364
{
9365
    env->regs[15] = gen_opc_pc[pc_pos];
9366
}