Statistics
| Branch: | Revision:

root / target-arm / translate.c @ 3670669c

History | View | Annotate | Download (265.1 kB)

1
/*
2
 *  ARM translation
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *  Copyright (c) 2005-2007 CodeSourcery
6
 *  Copyright (c) 2007 OpenedHand, Ltd.
7
 *
8
 * This library is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU Lesser General Public
10
 * License as published by the Free Software Foundation; either
11
 * version 2 of the License, or (at your option) any later version.
12
 *
13
 * This library is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * Lesser General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with this library; if not, write to the Free Software
20
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21
 */
22
#include <stdarg.h>
23
#include <stdlib.h>
24
#include <stdio.h>
25
#include <string.h>
26
#include <inttypes.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30
#include "disas.h"
31
#include "tcg-op.h"
32

    
33
#define GEN_HELPER 1
34
#include "helpers.h"
35

    
36
#define ENABLE_ARCH_5J    0
37
#define ENABLE_ARCH_6     arm_feature(env, ARM_FEATURE_V6)
38
#define ENABLE_ARCH_6K   arm_feature(env, ARM_FEATURE_V6K)
39
#define ENABLE_ARCH_6T2   arm_feature(env, ARM_FEATURE_THUMB2)
40
#define ENABLE_ARCH_7     arm_feature(env, ARM_FEATURE_V7)
41

    
42
#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
43

    
44
/* internal defines */
45
typedef struct DisasContext {
46
    target_ulong pc;
47
    int is_jmp;
48
    /* Nonzero if this instruction has been conditionally skipped.  */
49
    int condjmp;
50
    /* The label that will be jumped to when the instruction is skipped.  */
51
    int condlabel;
52
    /* Thumb-2 condtional execution bits.  */
53
    int condexec_mask;
54
    int condexec_cond;
55
    struct TranslationBlock *tb;
56
    int singlestep_enabled;
57
    int thumb;
58
    int is_mem;
59
#if !defined(CONFIG_USER_ONLY)
60
    int user;
61
#endif
62
} DisasContext;
63

    
64
#if defined(CONFIG_USER_ONLY)
65
#define IS_USER(s) 1
66
#else
67
#define IS_USER(s) (s->user)
68
#endif
69

    
70
/* These instructions trap after executing, so defer them until after the
71
   conditional executions state has been updated.  */
72
#define DISAS_WFI 4
73
#define DISAS_SWI 5
74

    
75
/* XXX: move that elsewhere */
76
extern FILE *logfile;
77
extern int loglevel;
78

    
79
static TCGv cpu_env;
80
/* FIXME:  These should be removed.  */
81
static TCGv cpu_T[3];
82

    
83
/* initialize TCG globals.  */
84
void arm_translate_init(void)
85
{
86
    cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
87

    
88
    cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
89
    cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
90
    cpu_T[2] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG3, "T2");
91
}
92

    
93
/* The code generator doesn't like lots of temporaries, so maintain our own
94
   cache for reuse within a function.  */
95
#define MAX_TEMPS 8
96
static int num_temps;
97
static TCGv temps[MAX_TEMPS];
98

    
99
/* Allocate a temporary variable.  */
100
static TCGv new_tmp(void)
101
{
102
    TCGv tmp;
103
    if (num_temps == MAX_TEMPS)
104
        abort();
105

    
106
    if (GET_TCGV(temps[num_temps]))
107
      return temps[num_temps++];
108

    
109
    tmp = tcg_temp_new(TCG_TYPE_I32);
110
    temps[num_temps++] = tmp;
111
    return tmp;
112
}
113

    
114
/* Release a temporary variable.  */
115
static void dead_tmp(TCGv tmp)
116
{
117
    int i;
118
    num_temps--;
119
    i = num_temps;
120
    if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
121
        return;
122

    
123
    /* Shuffle this temp to the last slot.  */
124
    while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
125
        i--;
126
    while (i < num_temps) {
127
        temps[i] = temps[i + 1];
128
        i++;
129
    }
130
    temps[i] = tmp;
131
}
132

    
133
/* Set a variable to the value of a CPU register.  */
134
static void load_reg_var(DisasContext *s, TCGv var, int reg)
135
{
136
    if (reg == 15) {
137
        uint32_t addr;
138
        /* normaly, since we updated PC, we need only to add one insn */
139
        if (s->thumb)
140
            addr = (long)s->pc + 2;
141
        else
142
            addr = (long)s->pc + 4;
143
        tcg_gen_movi_i32(var, addr);
144
    } else {
145
        tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
146
    }
147
}
148

    
149
/* Create a new temporary and set it to the value of a CPU register.  */
150
static inline TCGv load_reg(DisasContext *s, int reg)
151
{
152
    TCGv tmp = new_tmp();
153
    load_reg_var(s, tmp, reg);
154
    return tmp;
155
}
156

    
157
/* Set a CPU register.  The source must be a temporary and will be
158
   marked as dead.  */
159
static void store_reg(DisasContext *s, int reg, TCGv var)
160
{
161
    if (reg == 15) {
162
        tcg_gen_andi_i32(var, var, ~1);
163
        s->is_jmp = DISAS_JUMP;
164
    }
165
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
166
    dead_tmp(var);
167
}
168

    
169

    
170
/* Basic operations.  */
171
#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
172
#define gen_op_movl_T0_T2() tcg_gen_mov_i32(cpu_T[0], cpu_T[2])
173
#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
174
#define gen_op_movl_T1_T2() tcg_gen_mov_i32(cpu_T[1], cpu_T[2])
175
#define gen_op_movl_T2_T0() tcg_gen_mov_i32(cpu_T[2], cpu_T[0])
176
#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
177
#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
178
#define gen_op_movl_T2_im(im) tcg_gen_movi_i32(cpu_T[2], im)
179

    
180
#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
181
#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
182
#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
183
#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
184

    
185
#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
186
#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
187
#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
188
#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
189
#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
190
#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
191
#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
192

    
193
#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
194
#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
195
#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
196
#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
197
#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
198

    
199
/* Value extensions.  */
200
#define gen_uxtb(var) tcg_gen_andi_i32(var, var, 0xff)
201
#define gen_uxth(var) tcg_gen_andi_i32(var, var, 0xffff)
202
#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
203
#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
204

    
205
#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
206
#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
207
#define gen_op_rev_T0() tcg_gen_bswap_i32(cpu_T[0], cpu_T[0])
208

    
209
#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
210

    
211
#define gen_op_addl_T0_T1_setq() \
212
    gen_helper_add_setq(cpu_T[0], cpu_T[0], cpu_T[1])
213
#define gen_op_addl_T0_T1_saturate() \
214
    gen_helper_add_saturate(cpu_T[0], cpu_T[0], cpu_T[1])
215
#define gen_op_subl_T0_T1_saturate() \
216
    gen_helper_sub_saturate(cpu_T[0], cpu_T[0], cpu_T[1])
217
#define gen_op_addl_T0_T1_usaturate() \
218
    gen_helper_add_usaturate(cpu_T[0], cpu_T[0], cpu_T[1])
219
#define gen_op_subl_T0_T1_usaturate() \
220
    gen_helper_sub_usaturate(cpu_T[0], cpu_T[0], cpu_T[1])
221

    
222
/* Copy the most significant bit of T0 to all bits of T1.  */
223
#define gen_op_signbit_T1_T0() tcg_gen_sari_i32(cpu_T[1], cpu_T[0], 31)
224

    
225
static void gen_smul_dual(TCGv a, TCGv b)
226
{
227
    TCGv tmp1 = new_tmp();
228
    TCGv tmp2 = new_tmp();
229
    TCGv res;
230
    tcg_gen_ext8s_i32(tmp1, a);
231
    tcg_gen_ext8s_i32(tmp2, b);
232
    tcg_gen_mul_i32(tmp1, tmp1, tmp2);
233
    dead_tmp(tmp2);
234
    tcg_gen_sari_i32(a, a, 16);
235
    tcg_gen_sari_i32(b, b, 16);
236
    tcg_gen_mul_i32(b, b, a);
237
    tcg_gen_mov_i32(a, tmp1);
238
    dead_tmp(tmp1);
239
}
240

    
241
/* Byteswap each halfword.  */
242
static void gen_rev16(TCGv var)
243
{
244
    TCGv tmp = new_tmp();
245
    tcg_gen_shri_i32(tmp, var, 8);
246
    tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
247
    tcg_gen_shli_i32(var, var, 8);
248
    tcg_gen_andi_i32(var, var, 0xff00ff00);
249
    tcg_gen_or_i32(var, var, tmp);
250
    dead_tmp(tmp);
251
}
252

    
253
/* Byteswap low halfword and sign extend.  */
254
static void gen_revsh(TCGv var)
255
{
256
    TCGv tmp = new_tmp();
257
    tcg_gen_shri_i32(tmp, var, 8);
258
    tcg_gen_andi_i32(tmp, tmp, 0x00ff);
259
    tcg_gen_shli_i32(var, var, 8);
260
    tcg_gen_ext8s_i32(var, var);
261
    tcg_gen_or_i32(var, var, tmp);
262
    dead_tmp(tmp);
263
}
264

    
265
/* Unsigned bitfield extract.  */
266
static void gen_ubfx(TCGv var, int shift, uint32_t mask)
267
{
268
    if (shift)
269
        tcg_gen_shri_i32(var, var, shift);
270
    tcg_gen_andi_i32(var, var, mask);
271
}
272

    
273
/* Signed bitfield extract.  */
274
static void gen_sbfx(TCGv var, int shift, int width)
275
{
276
    uint32_t signbit;
277

    
278
    if (shift)
279
        tcg_gen_sari_i32(var, var, shift);
280
    if (shift + width < 32) {
281
        signbit = 1u << (width - 1);
282
        tcg_gen_andi_i32(var, var, (1u << width) - 1);
283
        tcg_gen_xori_i32(var, var, signbit);
284
        tcg_gen_subi_i32(var, var, signbit);
285
    }
286
}
287

    
288
/* Bitfield insertion.  Insert val into base.  Clobbers base and val.  */
289
static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
290
{
291
    tcg_gen_shli_i32(val, val, shift);
292
    tcg_gen_andi_i32(val, val, mask);
293
    tcg_gen_andi_i32(base, base, ~mask);
294
    tcg_gen_or_i32(dest, base, val);
295
}
296

    
297
static void gen_op_roundqd_T0_T1(void)
298
{
299
    tcg_gen_shri_i32(cpu_T[0], cpu_T[0], 31);
300
    tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1]);
301
}
302

    
303
/* FIXME: Most targets have native widening multiplication.
304
   It would be good to use that instead of a full wide multiply.  */
305
/* Unsigned 32x32->64 multiply.  */
306
static void gen_op_mull_T0_T1(void)
307
{
308
    TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
309
    TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
310

    
311
    tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
312
    tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
313
    tcg_gen_mul_i64(tmp1, tmp1, tmp2);
314
    tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
315
    tcg_gen_shri_i64(tmp1, tmp1, 32);
316
    tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
317
}
318

    
319
/* Signed 32x32->64 multiply.  */
320
static void gen_op_imull_T0_T1(void)
321
{
322
    TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
323
    TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
324

    
325
    tcg_gen_ext_i32_i64(tmp1, cpu_T[0]);
326
    tcg_gen_ext_i32_i64(tmp2, cpu_T[1]);
327
    tcg_gen_mul_i64(tmp1, tmp1, tmp2);
328
    tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
329
    tcg_gen_shri_i64(tmp1, tmp1, 32);
330
    tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
331
}
332

    
333
/* Swap low and high halfwords.  */
334
static void gen_swap_half(TCGv var)
335
{
336
    TCGv tmp = new_tmp();
337
    tcg_gen_shri_i32(tmp, var, 16);
338
    tcg_gen_shli_i32(var, var, 16);
339
    tcg_gen_or_i32(var, var, tmp);
340
    dead_tmp(tmp);
341
}
342

    
343
/* Dual 16-bit add.  Result placed in t0 and t1 is marked as dead.
344
    tmp = (t0 ^ t1) & 0x8000;
345
    t0 &= ~0x8000;
346
    t1 &= ~0x8000;
347
    t0 = (t0 + t1) ^ tmp;
348
 */
349

    
350
static void gen_add16(TCGv t0, TCGv t1)
351
{
352
    TCGv tmp = new_tmp();
353
    tcg_gen_xor_i32(tmp, t0, t1);
354
    tcg_gen_andi_i32(tmp, tmp, 0x8000);
355
    tcg_gen_andi_i32(t0, t0, ~0x8000);
356
    tcg_gen_andi_i32(t1, t1, ~0x8000);
357
    tcg_gen_add_i32(t0, t0, t1);
358
    tcg_gen_xor_i32(t0, t0, tmp);
359
    dead_tmp(tmp);
360
    dead_tmp(t1);
361
}
362

    
363
#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
364

    
365
/* Set CF to the top bit of var.  */
366
static void gen_set_CF_bit31(TCGv var)
367
{
368
    TCGv tmp = new_tmp();
369
    tcg_gen_shri_i32(tmp, var, 31);
370
    gen_set_CF(var);
371
    dead_tmp(tmp);
372
}
373

    
374
/* Set N and Z flags from var.  */
375
static inline void gen_logic_CC(TCGv var)
376
{
377
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NZF));
378
}
379

    
380
/* T0 += T1 + CF.  */
381
static void gen_adc_T0_T1(void)
382
{
383
    TCGv tmp = new_tmp();
384
    gen_op_addl_T0_T1();
385
    tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUState, CF));
386
    tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
387
    dead_tmp(tmp);
388
}
389

    
390
/* dest = T0 - T1 + CF - 1.  */
391
static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
392
{
393
    TCGv tmp = new_tmp();
394
    tcg_gen_sub_i32(dest, t0, t1);
395
    tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUState, CF));
396
    tcg_gen_add_i32(dest, dest, tmp);
397
    tcg_gen_subi_i32(dest, dest, 1);
398
    dead_tmp(tmp);
399
}
400

    
401
#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
402
#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
403

    
404
/* FIXME:  Implement this natively.  */
405
static inline void tcg_gen_not_i32(TCGv t0, TCGv t1)
406
{
407
    tcg_gen_xori_i32(t0, t1, ~0);
408
}
409

    
410
/* T0 &= ~T1.  Clobbers T1.  */
411
/* FIXME: Implement bic natively.  */
412
static inline void gen_op_bicl_T0_T1(void)
413
{
414
    gen_op_notl_T1();
415
    gen_op_andl_T0_T1();
416
}
417

    
418
/* FIXME:  Implement this natively.  */
419
static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
420
{
421
    TCGv tmp;
422

    
423
    if (i == 0)
424
        return;
425

    
426
    tmp = new_tmp();
427
    tcg_gen_shri_i32(tmp, t1, i);
428
    tcg_gen_shli_i32(t1, t1, 32 - i);
429
    tcg_gen_or_i32(t0, t1, tmp);
430
    dead_tmp(tmp);
431
}
432

    
433
static void shifter_out_im(TCGv var, int shift)
434
{
435
    TCGv tmp = new_tmp();
436
    if (shift == 0) {
437
        tcg_gen_andi_i32(tmp, var, 1);
438
    } else {
439
        tcg_gen_shri_i32(tmp, var, shift);
440
        if (shift != 31);
441
            tcg_gen_andi_i32(tmp, tmp, 1);
442
    }
443
    gen_set_CF(tmp);
444
    dead_tmp(tmp);
445
}
446

    
447
/* Shift by immediate.  Includes special handling for shift == 0.  */
448
static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
449
{
450
    switch (shiftop) {
451
    case 0: /* LSL */
452
        if (shift != 0) {
453
            if (flags)
454
                shifter_out_im(var, 32 - shift);
455
            tcg_gen_shli_i32(var, var, shift);
456
        }
457
        break;
458
    case 1: /* LSR */
459
        if (shift == 0) {
460
            if (flags) {
461
                tcg_gen_shri_i32(var, var, 31);
462
                gen_set_CF(var);
463
            }
464
            tcg_gen_movi_i32(var, 0);
465
        } else {
466
            if (flags)
467
                shifter_out_im(var, shift - 1);
468
            tcg_gen_shri_i32(var, var, shift);
469
        }
470
        break;
471
    case 2: /* ASR */
472
        if (shift == 0)
473
            shift = 32;
474
        if (flags)
475
            shifter_out_im(var, shift - 1);
476
        if (shift == 32)
477
          shift = 31;
478
        tcg_gen_sari_i32(var, var, shift);
479
        break;
480
    case 3: /* ROR/RRX */
481
        if (shift != 0) {
482
            if (flags)
483
                shifter_out_im(var, shift - 1);
484
            tcg_gen_rori_i32(var, var, shift); break;
485
        } else {
486
            TCGv tmp = new_tmp();
487
            tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUState, CF));
488
            if (flags)
489
                shifter_out_im(var, 0);
490
            tcg_gen_shri_i32(var, var, 1);
491
            tcg_gen_shli_i32(tmp, tmp, 31);
492
            tcg_gen_or_i32(var, var, tmp);
493
            dead_tmp(tmp);
494
        }
495
    }
496
};
497

    
498
#define PAS_OP(pfx) {  \
499
    gen_op_ ## pfx ## add16_T0_T1, \
500
    gen_op_ ## pfx ## addsubx_T0_T1, \
501
    gen_op_ ## pfx ## subaddx_T0_T1, \
502
    gen_op_ ## pfx ## sub16_T0_T1, \
503
    gen_op_ ## pfx ## add8_T0_T1, \
504
    NULL, \
505
    NULL, \
506
    gen_op_ ## pfx ## sub8_T0_T1 }
507

    
508
static GenOpFunc *gen_arm_parallel_addsub[8][8] = {
509
    {},
510
    PAS_OP(s),
511
    PAS_OP(q),
512
    PAS_OP(sh),
513
    {},
514
    PAS_OP(u),
515
    PAS_OP(uq),
516
    PAS_OP(uh),
517
};
518
#undef PAS_OP
519

    
520
/* For unknown reasons Arm and Thumb-2 use arbitrarily diffenet encodings.  */
521
#define PAS_OP(pfx) {  \
522
    gen_op_ ## pfx ## add8_T0_T1, \
523
    gen_op_ ## pfx ## add16_T0_T1, \
524
    gen_op_ ## pfx ## addsubx_T0_T1, \
525
    NULL, \
526
    gen_op_ ## pfx ## sub8_T0_T1, \
527
    gen_op_ ## pfx ## sub16_T0_T1, \
528
    gen_op_ ## pfx ## subaddx_T0_T1, \
529
    NULL }
530

    
531
static GenOpFunc *gen_thumb2_parallel_addsub[8][8] = {
532
    PAS_OP(s),
533
    PAS_OP(q),
534
    PAS_OP(sh),
535
    {},
536
    PAS_OP(u),
537
    PAS_OP(uq),
538
    PAS_OP(uh),
539
    {}
540
};
541
#undef PAS_OP
542

    
543
static GenOpFunc1 *gen_test_cc[14] = {
544
    gen_op_test_eq,
545
    gen_op_test_ne,
546
    gen_op_test_cs,
547
    gen_op_test_cc,
548
    gen_op_test_mi,
549
    gen_op_test_pl,
550
    gen_op_test_vs,
551
    gen_op_test_vc,
552
    gen_op_test_hi,
553
    gen_op_test_ls,
554
    gen_op_test_ge,
555
    gen_op_test_lt,
556
    gen_op_test_gt,
557
    gen_op_test_le,
558
};
559

    
560
const uint8_t table_logic_cc[16] = {
561
    1, /* and */
562
    1, /* xor */
563
    0, /* sub */
564
    0, /* rsb */
565
    0, /* add */
566
    0, /* adc */
567
    0, /* sbc */
568
    0, /* rsc */
569
    1, /* andl */
570
    1, /* xorl */
571
    0, /* cmp */
572
    0, /* cmn */
573
    1, /* orr */
574
    1, /* mov */
575
    1, /* bic */
576
    1, /* mvn */
577
};
578

    
579
static GenOpFunc *gen_shift_T1_T0[4] = {
580
    gen_op_shll_T1_T0,
581
    gen_op_shrl_T1_T0,
582
    gen_op_sarl_T1_T0,
583
    gen_op_rorl_T1_T0,
584
};
585

    
586
static GenOpFunc *gen_shift_T1_T0_cc[4] = {
587
    gen_op_shll_T1_T0_cc,
588
    gen_op_shrl_T1_T0_cc,
589
    gen_op_sarl_T1_T0_cc,
590
    gen_op_rorl_T1_T0_cc,
591
};
592

    
593
/* Set PC and thumb state from T0.  Clobbers T0.  */
594
static inline void gen_bx(DisasContext *s)
595
{
596
    TCGv tmp;
597

    
598
    s->is_jmp = DISAS_UPDATE;
599
    tmp = new_tmp();
600
    tcg_gen_andi_i32(tmp, cpu_T[0], 1);
601
    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
602
    dead_tmp(tmp);
603
    tcg_gen_andi_i32(cpu_T[0], cpu_T[0], ~1);
604
    tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, regs[15]));
605
}
606

    
607
#if defined(CONFIG_USER_ONLY)
608
#define gen_ldst(name, s) gen_op_##name##_raw()
609
#else
610
#define gen_ldst(name, s) do { \
611
    s->is_mem = 1; \
612
    if (IS_USER(s)) \
613
        gen_op_##name##_user(); \
614
    else \
615
        gen_op_##name##_kernel(); \
616
    } while (0)
617
#endif
618

    
619
static inline void gen_movl_T0_reg(DisasContext *s, int reg)
620
{
621
    load_reg_var(s, cpu_T[0], reg);
622
}
623

    
624
static inline void gen_movl_T1_reg(DisasContext *s, int reg)
625
{
626
    load_reg_var(s, cpu_T[1], reg);
627
}
628

    
629
static inline void gen_movl_T2_reg(DisasContext *s, int reg)
630
{
631
    load_reg_var(s, cpu_T[2], reg);
632
}
633

    
634
static inline void gen_set_pc_T0(void)
635
{
636
    tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, regs[15]));
637
}
638

    
639
static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
640
{
641
    TCGv tmp;
642
    if (reg == 15) {
643
        tmp = new_tmp();
644
        tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
645
    } else {
646
        tmp = cpu_T[t];
647
    }
648
    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
649
    if (reg == 15) {
650
        dead_tmp(tmp);
651
        s->is_jmp = DISAS_JUMP;
652
    }
653
}
654

    
655
static inline void gen_movl_reg_T0(DisasContext *s, int reg)
656
{
657
    gen_movl_reg_TN(s, reg, 0);
658
}
659

    
660
static inline void gen_movl_reg_T1(DisasContext *s, int reg)
661
{
662
    gen_movl_reg_TN(s, reg, 1);
663
}
664

    
665
/* Force a TB lookup after an instruction that changes the CPU state.  */
666
static inline void gen_lookup_tb(DisasContext *s)
667
{
668
    gen_op_movl_T0_im(s->pc);
669
    gen_movl_reg_T0(s, 15);
670
    s->is_jmp = DISAS_UPDATE;
671
}
672

    
673
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
674
{
675
    int val, rm, shift, shiftop;
676
    TCGv offset;
677

    
678
    if (!(insn & (1 << 25))) {
679
        /* immediate */
680
        val = insn & 0xfff;
681
        if (!(insn & (1 << 23)))
682
            val = -val;
683
        if (val != 0)
684
            gen_op_addl_T1_im(val);
685
    } else {
686
        /* shift/register */
687
        rm = (insn) & 0xf;
688
        shift = (insn >> 7) & 0x1f;
689
        shiftop = (insn >> 5) & 3;
690
        offset = load_reg(s, rm);
691
        gen_arm_shift_im(offset, shiftop, shift, 0);
692
        if (!(insn & (1 << 23)))
693
            tcg_gen_sub_i32(cpu_T[1], cpu_T[1], offset);
694
        else
695
            tcg_gen_add_i32(cpu_T[1], cpu_T[1], offset);
696
        dead_tmp(offset);
697
    }
698
}
699

    
700
static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
701
                                        int extra)
702
{
703
    int val, rm;
704
    TCGv offset;
705

    
706
    if (insn & (1 << 22)) {
707
        /* immediate */
708
        val = (insn & 0xf) | ((insn >> 4) & 0xf0);
709
        if (!(insn & (1 << 23)))
710
            val = -val;
711
        val += extra;
712
        if (val != 0)
713
            gen_op_addl_T1_im(val);
714
    } else {
715
        /* register */
716
        if (extra)
717
            gen_op_addl_T1_im(extra);
718
        rm = (insn) & 0xf;
719
        offset = load_reg(s, rm);
720
        if (!(insn & (1 << 23)))
721
            tcg_gen_sub_i32(cpu_T[1], cpu_T[1], offset);
722
        else
723
            tcg_gen_add_i32(cpu_T[1], cpu_T[1], offset);
724
        dead_tmp(offset);
725
    }
726
}
727

    
728
#define VFP_OP(name)                      \
729
static inline void gen_vfp_##name(int dp) \
730
{                                         \
731
    if (dp)                               \
732
        gen_op_vfp_##name##d();           \
733
    else                                  \
734
        gen_op_vfp_##name##s();           \
735
}
736

    
737
#define VFP_OP1(name)                               \
738
static inline void gen_vfp_##name(int dp, int arg)  \
739
{                                                   \
740
    if (dp)                                         \
741
        gen_op_vfp_##name##d(arg);                  \
742
    else                                            \
743
        gen_op_vfp_##name##s(arg);                  \
744
}
745

    
746
VFP_OP(add)
747
VFP_OP(sub)
748
VFP_OP(mul)
749
VFP_OP(div)
750
VFP_OP(neg)
751
VFP_OP(abs)
752
VFP_OP(sqrt)
753
VFP_OP(cmp)
754
VFP_OP(cmpe)
755
VFP_OP(F1_ld0)
756
VFP_OP(uito)
757
VFP_OP(sito)
758
VFP_OP(toui)
759
VFP_OP(touiz)
760
VFP_OP(tosi)
761
VFP_OP(tosiz)
762
VFP_OP1(tosh)
763
VFP_OP1(tosl)
764
VFP_OP1(touh)
765
VFP_OP1(toul)
766
VFP_OP1(shto)
767
VFP_OP1(slto)
768
VFP_OP1(uhto)
769
VFP_OP1(ulto)
770

    
771
#undef VFP_OP
772

    
773
static inline void gen_vfp_fconst(int dp, uint32_t val)
774
{
775
    if (dp)
776
        gen_op_vfp_fconstd(val);
777
    else
778
        gen_op_vfp_fconsts(val);
779
}
780

    
781
static inline void gen_vfp_ld(DisasContext *s, int dp)
782
{
783
    if (dp)
784
        gen_ldst(vfp_ldd, s);
785
    else
786
        gen_ldst(vfp_lds, s);
787
}
788

    
789
static inline void gen_vfp_st(DisasContext *s, int dp)
790
{
791
    if (dp)
792
        gen_ldst(vfp_std, s);
793
    else
794
        gen_ldst(vfp_sts, s);
795
}
796

    
797
static inline long
798
vfp_reg_offset (int dp, int reg)
799
{
800
    if (dp)
801
        return offsetof(CPUARMState, vfp.regs[reg]);
802
    else if (reg & 1) {
803
        return offsetof(CPUARMState, vfp.regs[reg >> 1])
804
          + offsetof(CPU_DoubleU, l.upper);
805
    } else {
806
        return offsetof(CPUARMState, vfp.regs[reg >> 1])
807
          + offsetof(CPU_DoubleU, l.lower);
808
    }
809
}
810

    
811
/* Return the offset of a 32-bit piece of a NEON register.
812
   zero is the least significant end of the register.  */
813
static inline long
814
neon_reg_offset (int reg, int n)
815
{
816
    int sreg;
817
    sreg = reg * 2 + n;
818
    return vfp_reg_offset(0, sreg);
819
}
820

    
821
#define NEON_GET_REG(T, reg, n) gen_op_neon_getreg_##T(neon_reg_offset(reg, n))
822
#define NEON_SET_REG(T, reg, n) gen_op_neon_setreg_##T(neon_reg_offset(reg, n))
823

    
824
static inline void gen_mov_F0_vreg(int dp, int reg)
825
{
826
    if (dp)
827
        gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
828
    else
829
        gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
830
}
831

    
832
static inline void gen_mov_F1_vreg(int dp, int reg)
833
{
834
    if (dp)
835
        gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
836
    else
837
        gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
838
}
839

    
840
static inline void gen_mov_vreg_F0(int dp, int reg)
841
{
842
    if (dp)
843
        gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
844
    else
845
        gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
846
}
847

    
848
#define ARM_CP_RW_BIT        (1 << 20)
849

    
850
static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
851
{
852
    int rd;
853
    uint32_t offset;
854

    
855
    rd = (insn >> 16) & 0xf;
856
    gen_movl_T1_reg(s, rd);
857

    
858
    offset = (insn & 0xff) << ((insn >> 7) & 2);
859
    if (insn & (1 << 24)) {
860
        /* Pre indexed */
861
        if (insn & (1 << 23))
862
            gen_op_addl_T1_im(offset);
863
        else
864
            gen_op_addl_T1_im(-offset);
865

    
866
        if (insn & (1 << 21))
867
            gen_movl_reg_T1(s, rd);
868
    } else if (insn & (1 << 21)) {
869
        /* Post indexed */
870
        if (insn & (1 << 23))
871
            gen_op_movl_T0_im(offset);
872
        else
873
            gen_op_movl_T0_im(- offset);
874
        gen_op_addl_T0_T1();
875
        gen_movl_reg_T0(s, rd);
876
    } else if (!(insn & (1 << 23)))
877
        return 1;
878
    return 0;
879
}
880

    
881
static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
882
{
883
    int rd = (insn >> 0) & 0xf;
884

    
885
    if (insn & (1 << 8))
886
        if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
887
            return 1;
888
        else
889
            gen_op_iwmmxt_movl_T0_wCx(rd);
890
    else
891
        gen_op_iwmmxt_movl_T0_T1_wRn(rd);
892

    
893
    gen_op_movl_T1_im(mask);
894
    gen_op_andl_T0_T1();
895
    return 0;
896
}
897

    
898
/* Disassemble an iwMMXt instruction.  Returns nonzero if an error occured
899
   (ie. an undefined instruction).  */
900
static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
901
{
902
    int rd, wrd;
903
    int rdhi, rdlo, rd0, rd1, i;
904

    
905
    if ((insn & 0x0e000e00) == 0x0c000000) {
906
        if ((insn & 0x0fe00ff0) == 0x0c400000) {
907
            wrd = insn & 0xf;
908
            rdlo = (insn >> 12) & 0xf;
909
            rdhi = (insn >> 16) & 0xf;
910
            if (insn & ARM_CP_RW_BIT) {                        /* TMRRC */
911
                gen_op_iwmmxt_movl_T0_T1_wRn(wrd);
912
                gen_movl_reg_T0(s, rdlo);
913
                gen_movl_reg_T1(s, rdhi);
914
            } else {                                        /* TMCRR */
915
                gen_movl_T0_reg(s, rdlo);
916
                gen_movl_T1_reg(s, rdhi);
917
                gen_op_iwmmxt_movl_wRn_T0_T1(wrd);
918
                gen_op_iwmmxt_set_mup();
919
            }
920
            return 0;
921
        }
922

    
923
        wrd = (insn >> 12) & 0xf;
924
        if (gen_iwmmxt_address(s, insn))
925
            return 1;
926
        if (insn & ARM_CP_RW_BIT) {
927
            if ((insn >> 28) == 0xf) {                        /* WLDRW wCx */
928
                gen_ldst(ldl, s);
929
                gen_op_iwmmxt_movl_wCx_T0(wrd);
930
            } else {
931
                if (insn & (1 << 8))
932
                    if (insn & (1 << 22))                /* WLDRD */
933
                        gen_ldst(iwmmxt_ldq, s);
934
                    else                                /* WLDRW wRd */
935
                        gen_ldst(iwmmxt_ldl, s);
936
                else
937
                    if (insn & (1 << 22))                /* WLDRH */
938
                        gen_ldst(iwmmxt_ldw, s);
939
                    else                                /* WLDRB */
940
                        gen_ldst(iwmmxt_ldb, s);
941
                gen_op_iwmmxt_movq_wRn_M0(wrd);
942
            }
943
        } else {
944
            if ((insn >> 28) == 0xf) {                        /* WSTRW wCx */
945
                gen_op_iwmmxt_movl_T0_wCx(wrd);
946
                gen_ldst(stl, s);
947
            } else {
948
                gen_op_iwmmxt_movq_M0_wRn(wrd);
949
                if (insn & (1 << 8))
950
                    if (insn & (1 << 22))                /* WSTRD */
951
                        gen_ldst(iwmmxt_stq, s);
952
                    else                                /* WSTRW wRd */
953
                        gen_ldst(iwmmxt_stl, s);
954
                else
955
                    if (insn & (1 << 22))                /* WSTRH */
956
                        gen_ldst(iwmmxt_ldw, s);
957
                    else                                /* WSTRB */
958
                        gen_ldst(iwmmxt_stb, s);
959
            }
960
        }
961
        return 0;
962
    }
963

    
964
    if ((insn & 0x0f000000) != 0x0e000000)
965
        return 1;
966

    
967
    switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
968
    case 0x000:                                                /* WOR */
969
        wrd = (insn >> 12) & 0xf;
970
        rd0 = (insn >> 0) & 0xf;
971
        rd1 = (insn >> 16) & 0xf;
972
        gen_op_iwmmxt_movq_M0_wRn(rd0);
973
        gen_op_iwmmxt_orq_M0_wRn(rd1);
974
        gen_op_iwmmxt_setpsr_nz();
975
        gen_op_iwmmxt_movq_wRn_M0(wrd);
976
        gen_op_iwmmxt_set_mup();
977
        gen_op_iwmmxt_set_cup();
978
        break;
979
    case 0x011:                                                /* TMCR */
980
        if (insn & 0xf)
981
            return 1;
982
        rd = (insn >> 12) & 0xf;
983
        wrd = (insn >> 16) & 0xf;
984
        switch (wrd) {
985
        case ARM_IWMMXT_wCID:
986
        case ARM_IWMMXT_wCASF:
987
            break;
988
        case ARM_IWMMXT_wCon:
989
            gen_op_iwmmxt_set_cup();
990
            /* Fall through.  */
991
        case ARM_IWMMXT_wCSSF:
992
            gen_op_iwmmxt_movl_T0_wCx(wrd);
993
            gen_movl_T1_reg(s, rd);
994
            gen_op_bicl_T0_T1();
995
            gen_op_iwmmxt_movl_wCx_T0(wrd);
996
            break;
997
        case ARM_IWMMXT_wCGR0:
998
        case ARM_IWMMXT_wCGR1:
999
        case ARM_IWMMXT_wCGR2:
1000
        case ARM_IWMMXT_wCGR3:
1001
            gen_op_iwmmxt_set_cup();
1002
            gen_movl_reg_T0(s, rd);
1003
            gen_op_iwmmxt_movl_wCx_T0(wrd);
1004
            break;
1005
        default:
1006
            return 1;
1007
        }
1008
        break;
1009
    case 0x100:                                                /* WXOR */
1010
        wrd = (insn >> 12) & 0xf;
1011
        rd0 = (insn >> 0) & 0xf;
1012
        rd1 = (insn >> 16) & 0xf;
1013
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1014
        gen_op_iwmmxt_xorq_M0_wRn(rd1);
1015
        gen_op_iwmmxt_setpsr_nz();
1016
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1017
        gen_op_iwmmxt_set_mup();
1018
        gen_op_iwmmxt_set_cup();
1019
        break;
1020
    case 0x111:                                                /* TMRC */
1021
        if (insn & 0xf)
1022
            return 1;
1023
        rd = (insn >> 12) & 0xf;
1024
        wrd = (insn >> 16) & 0xf;
1025
        gen_op_iwmmxt_movl_T0_wCx(wrd);
1026
        gen_movl_reg_T0(s, rd);
1027
        break;
1028
    case 0x300:                                                /* WANDN */
1029
        wrd = (insn >> 12) & 0xf;
1030
        rd0 = (insn >> 0) & 0xf;
1031
        rd1 = (insn >> 16) & 0xf;
1032
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1033
        gen_op_iwmmxt_negq_M0();
1034
        gen_op_iwmmxt_andq_M0_wRn(rd1);
1035
        gen_op_iwmmxt_setpsr_nz();
1036
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1037
        gen_op_iwmmxt_set_mup();
1038
        gen_op_iwmmxt_set_cup();
1039
        break;
1040
    case 0x200:                                                /* WAND */
1041
        wrd = (insn >> 12) & 0xf;
1042
        rd0 = (insn >> 0) & 0xf;
1043
        rd1 = (insn >> 16) & 0xf;
1044
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1045
        gen_op_iwmmxt_andq_M0_wRn(rd1);
1046
        gen_op_iwmmxt_setpsr_nz();
1047
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1048
        gen_op_iwmmxt_set_mup();
1049
        gen_op_iwmmxt_set_cup();
1050
        break;
1051
    case 0x810: case 0xa10:                                /* WMADD */
1052
        wrd = (insn >> 12) & 0xf;
1053
        rd0 = (insn >> 0) & 0xf;
1054
        rd1 = (insn >> 16) & 0xf;
1055
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1056
        if (insn & (1 << 21))
1057
            gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1058
        else
1059
            gen_op_iwmmxt_madduq_M0_wRn(rd1);
1060
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1061
        gen_op_iwmmxt_set_mup();
1062
        break;
1063
    case 0x10e: case 0x50e: case 0x90e: case 0xd0e:        /* WUNPCKIL */
1064
        wrd = (insn >> 12) & 0xf;
1065
        rd0 = (insn >> 16) & 0xf;
1066
        rd1 = (insn >> 0) & 0xf;
1067
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1068
        switch ((insn >> 22) & 3) {
1069
        case 0:
1070
            gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1071
            break;
1072
        case 1:
1073
            gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1074
            break;
1075
        case 2:
1076
            gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1077
            break;
1078
        case 3:
1079
            return 1;
1080
        }
1081
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1082
        gen_op_iwmmxt_set_mup();
1083
        gen_op_iwmmxt_set_cup();
1084
        break;
1085
    case 0x10c: case 0x50c: case 0x90c: case 0xd0c:        /* WUNPCKIH */
1086
        wrd = (insn >> 12) & 0xf;
1087
        rd0 = (insn >> 16) & 0xf;
1088
        rd1 = (insn >> 0) & 0xf;
1089
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1090
        switch ((insn >> 22) & 3) {
1091
        case 0:
1092
            gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1093
            break;
1094
        case 1:
1095
            gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1096
            break;
1097
        case 2:
1098
            gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1099
            break;
1100
        case 3:
1101
            return 1;
1102
        }
1103
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1104
        gen_op_iwmmxt_set_mup();
1105
        gen_op_iwmmxt_set_cup();
1106
        break;
1107
    case 0x012: case 0x112: case 0x412: case 0x512:        /* WSAD */
1108
        wrd = (insn >> 12) & 0xf;
1109
        rd0 = (insn >> 16) & 0xf;
1110
        rd1 = (insn >> 0) & 0xf;
1111
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1112
        if (insn & (1 << 22))
1113
            gen_op_iwmmxt_sadw_M0_wRn(rd1);
1114
        else
1115
            gen_op_iwmmxt_sadb_M0_wRn(rd1);
1116
        if (!(insn & (1 << 20)))
1117
            gen_op_iwmmxt_addl_M0_wRn(wrd);
1118
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1119
        gen_op_iwmmxt_set_mup();
1120
        break;
1121
    case 0x010: case 0x110: case 0x210: case 0x310:        /* WMUL */
1122
        wrd = (insn >> 12) & 0xf;
1123
        rd0 = (insn >> 16) & 0xf;
1124
        rd1 = (insn >> 0) & 0xf;
1125
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1126
        if (insn & (1 << 21))
1127
            gen_op_iwmmxt_mulsw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
1128
        else
1129
            gen_op_iwmmxt_muluw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
1130
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1131
        gen_op_iwmmxt_set_mup();
1132
        break;
1133
    case 0x410: case 0x510: case 0x610: case 0x710:        /* WMAC */
1134
        wrd = (insn >> 12) & 0xf;
1135
        rd0 = (insn >> 16) & 0xf;
1136
        rd1 = (insn >> 0) & 0xf;
1137
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1138
        if (insn & (1 << 21))
1139
            gen_op_iwmmxt_macsw_M0_wRn(rd1);
1140
        else
1141
            gen_op_iwmmxt_macuw_M0_wRn(rd1);
1142
        if (!(insn & (1 << 20))) {
1143
            if (insn & (1 << 21))
1144
                gen_op_iwmmxt_addsq_M0_wRn(wrd);
1145
            else
1146
                gen_op_iwmmxt_adduq_M0_wRn(wrd);
1147
        }
1148
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1149
        gen_op_iwmmxt_set_mup();
1150
        break;
1151
    case 0x006: case 0x406: case 0x806: case 0xc06:        /* WCMPEQ */
1152
        wrd = (insn >> 12) & 0xf;
1153
        rd0 = (insn >> 16) & 0xf;
1154
        rd1 = (insn >> 0) & 0xf;
1155
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1156
        switch ((insn >> 22) & 3) {
1157
        case 0:
1158
            gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1159
            break;
1160
        case 1:
1161
            gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1162
            break;
1163
        case 2:
1164
            gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1165
            break;
1166
        case 3:
1167
            return 1;
1168
        }
1169
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1170
        gen_op_iwmmxt_set_mup();
1171
        gen_op_iwmmxt_set_cup();
1172
        break;
1173
    case 0x800: case 0x900: case 0xc00: case 0xd00:        /* WAVG2 */
1174
        wrd = (insn >> 12) & 0xf;
1175
        rd0 = (insn >> 16) & 0xf;
1176
        rd1 = (insn >> 0) & 0xf;
1177
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1178
        if (insn & (1 << 22))
1179
            gen_op_iwmmxt_avgw_M0_wRn(rd1, (insn >> 20) & 1);
1180
        else
1181
            gen_op_iwmmxt_avgb_M0_wRn(rd1, (insn >> 20) & 1);
1182
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1183
        gen_op_iwmmxt_set_mup();
1184
        gen_op_iwmmxt_set_cup();
1185
        break;
1186
    case 0x802: case 0x902: case 0xa02: case 0xb02:        /* WALIGNR */
1187
        wrd = (insn >> 12) & 0xf;
1188
        rd0 = (insn >> 16) & 0xf;
1189
        rd1 = (insn >> 0) & 0xf;
1190
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1191
        gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1192
        gen_op_movl_T1_im(7);
1193
        gen_op_andl_T0_T1();
1194
        gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1195
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1196
        gen_op_iwmmxt_set_mup();
1197
        break;
1198
    case 0x601: case 0x605: case 0x609: case 0x60d:        /* TINSR */
1199
        rd = (insn >> 12) & 0xf;
1200
        wrd = (insn >> 16) & 0xf;
1201
        gen_movl_T0_reg(s, rd);
1202
        gen_op_iwmmxt_movq_M0_wRn(wrd);
1203
        switch ((insn >> 6) & 3) {
1204
        case 0:
1205
            gen_op_movl_T1_im(0xff);
1206
            gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1207
            break;
1208
        case 1:
1209
            gen_op_movl_T1_im(0xffff);
1210
            gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1211
            break;
1212
        case 2:
1213
            gen_op_movl_T1_im(0xffffffff);
1214
            gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1215
            break;
1216
        case 3:
1217
            return 1;
1218
        }
1219
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1220
        gen_op_iwmmxt_set_mup();
1221
        break;
1222
    case 0x107: case 0x507: case 0x907: case 0xd07:        /* TEXTRM */
1223
        rd = (insn >> 12) & 0xf;
1224
        wrd = (insn >> 16) & 0xf;
1225
        if (rd == 15)
1226
            return 1;
1227
        gen_op_iwmmxt_movq_M0_wRn(wrd);
1228
        switch ((insn >> 22) & 3) {
1229
        case 0:
1230
            if (insn & 8)
1231
                gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1232
            else {
1233
                gen_op_movl_T1_im(0xff);
1234
                gen_op_iwmmxt_extru_T0_M0_T1((insn & 7) << 3);
1235
            }
1236
            break;
1237
        case 1:
1238
            if (insn & 8)
1239
                gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1240
            else {
1241
                gen_op_movl_T1_im(0xffff);
1242
                gen_op_iwmmxt_extru_T0_M0_T1((insn & 3) << 4);
1243
            }
1244
            break;
1245
        case 2:
1246
            gen_op_movl_T1_im(0xffffffff);
1247
            gen_op_iwmmxt_extru_T0_M0_T1((insn & 1) << 5);
1248
            break;
1249
        case 3:
1250
            return 1;
1251
        }
1252
        gen_movl_reg_T0(s, rd);
1253
        break;
1254
    case 0x117: case 0x517: case 0x917: case 0xd17:        /* TEXTRC */
1255
        if ((insn & 0x000ff008) != 0x0003f000)
1256
            return 1;
1257
        gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1258
        switch ((insn >> 22) & 3) {
1259
        case 0:
1260
            gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1261
            break;
1262
        case 1:
1263
            gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1264
            break;
1265
        case 2:
1266
            gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1267
            break;
1268
        case 3:
1269
            return 1;
1270
        }
1271
        gen_op_shll_T1_im(28);
1272
        gen_op_movl_T0_T1();
1273
        gen_op_movl_cpsr_T0(0xf0000000);
1274
        break;
1275
    case 0x401: case 0x405: case 0x409: case 0x40d:        /* TBCST */
1276
        rd = (insn >> 12) & 0xf;
1277
        wrd = (insn >> 16) & 0xf;
1278
        gen_movl_T0_reg(s, rd);
1279
        switch ((insn >> 6) & 3) {
1280
        case 0:
1281
            gen_op_iwmmxt_bcstb_M0_T0();
1282
            break;
1283
        case 1:
1284
            gen_op_iwmmxt_bcstw_M0_T0();
1285
            break;
1286
        case 2:
1287
            gen_op_iwmmxt_bcstl_M0_T0();
1288
            break;
1289
        case 3:
1290
            return 1;
1291
        }
1292
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1293
        gen_op_iwmmxt_set_mup();
1294
        break;
1295
    case 0x113: case 0x513: case 0x913: case 0xd13:        /* TANDC */
1296
        if ((insn & 0x000ff00f) != 0x0003f000)
1297
            return 1;
1298
        gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1299
        switch ((insn >> 22) & 3) {
1300
        case 0:
1301
            for (i = 0; i < 7; i ++) {
1302
                gen_op_shll_T1_im(4);
1303
                gen_op_andl_T0_T1();
1304
            }
1305
            break;
1306
        case 1:
1307
            for (i = 0; i < 3; i ++) {
1308
                gen_op_shll_T1_im(8);
1309
                gen_op_andl_T0_T1();
1310
            }
1311
            break;
1312
        case 2:
1313
            gen_op_shll_T1_im(16);
1314
            gen_op_andl_T0_T1();
1315
            break;
1316
        case 3:
1317
            return 1;
1318
        }
1319
        gen_op_movl_cpsr_T0(0xf0000000);
1320
        break;
1321
    case 0x01c: case 0x41c: case 0x81c: case 0xc1c:        /* WACC */
1322
        wrd = (insn >> 12) & 0xf;
1323
        rd0 = (insn >> 16) & 0xf;
1324
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1325
        switch ((insn >> 22) & 3) {
1326
        case 0:
1327
            gen_op_iwmmxt_addcb_M0();
1328
            break;
1329
        case 1:
1330
            gen_op_iwmmxt_addcw_M0();
1331
            break;
1332
        case 2:
1333
            gen_op_iwmmxt_addcl_M0();
1334
            break;
1335
        case 3:
1336
            return 1;
1337
        }
1338
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1339
        gen_op_iwmmxt_set_mup();
1340
        break;
1341
    case 0x115: case 0x515: case 0x915: case 0xd15:        /* TORC */
1342
        if ((insn & 0x000ff00f) != 0x0003f000)
1343
            return 1;
1344
        gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1345
        switch ((insn >> 22) & 3) {
1346
        case 0:
1347
            for (i = 0; i < 7; i ++) {
1348
                gen_op_shll_T1_im(4);
1349
                gen_op_orl_T0_T1();
1350
            }
1351
            break;
1352
        case 1:
1353
            for (i = 0; i < 3; i ++) {
1354
                gen_op_shll_T1_im(8);
1355
                gen_op_orl_T0_T1();
1356
            }
1357
            break;
1358
        case 2:
1359
            gen_op_shll_T1_im(16);
1360
            gen_op_orl_T0_T1();
1361
            break;
1362
        case 3:
1363
            return 1;
1364
        }
1365
        gen_op_movl_T1_im(0xf0000000);
1366
        gen_op_andl_T0_T1();
1367
        gen_op_movl_cpsr_T0(0xf0000000);
1368
        break;
1369
    case 0x103: case 0x503: case 0x903: case 0xd03:        /* TMOVMSK */
1370
        rd = (insn >> 12) & 0xf;
1371
        rd0 = (insn >> 16) & 0xf;
1372
        if ((insn & 0xf) != 0)
1373
            return 1;
1374
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1375
        switch ((insn >> 22) & 3) {
1376
        case 0:
1377
            gen_op_iwmmxt_msbb_T0_M0();
1378
            break;
1379
        case 1:
1380
            gen_op_iwmmxt_msbw_T0_M0();
1381
            break;
1382
        case 2:
1383
            gen_op_iwmmxt_msbl_T0_M0();
1384
            break;
1385
        case 3:
1386
            return 1;
1387
        }
1388
        gen_movl_reg_T0(s, rd);
1389
        break;
1390
    case 0x106: case 0x306: case 0x506: case 0x706:        /* WCMPGT */
1391
    case 0x906: case 0xb06: case 0xd06: case 0xf06:
1392
        wrd = (insn >> 12) & 0xf;
1393
        rd0 = (insn >> 16) & 0xf;
1394
        rd1 = (insn >> 0) & 0xf;
1395
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1396
        switch ((insn >> 22) & 3) {
1397
        case 0:
1398
            if (insn & (1 << 21))
1399
                gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1400
            else
1401
                gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1402
            break;
1403
        case 1:
1404
            if (insn & (1 << 21))
1405
                gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1406
            else
1407
                gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1408
            break;
1409
        case 2:
1410
            if (insn & (1 << 21))
1411
                gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1412
            else
1413
                gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1414
            break;
1415
        case 3:
1416
            return 1;
1417
        }
1418
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1419
        gen_op_iwmmxt_set_mup();
1420
        gen_op_iwmmxt_set_cup();
1421
        break;
1422
    case 0x00e: case 0x20e: case 0x40e: case 0x60e:        /* WUNPCKEL */
1423
    case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1424
        wrd = (insn >> 12) & 0xf;
1425
        rd0 = (insn >> 16) & 0xf;
1426
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1427
        switch ((insn >> 22) & 3) {
1428
        case 0:
1429
            if (insn & (1 << 21))
1430
                gen_op_iwmmxt_unpacklsb_M0();
1431
            else
1432
                gen_op_iwmmxt_unpacklub_M0();
1433
            break;
1434
        case 1:
1435
            if (insn & (1 << 21))
1436
                gen_op_iwmmxt_unpacklsw_M0();
1437
            else
1438
                gen_op_iwmmxt_unpackluw_M0();
1439
            break;
1440
        case 2:
1441
            if (insn & (1 << 21))
1442
                gen_op_iwmmxt_unpacklsl_M0();
1443
            else
1444
                gen_op_iwmmxt_unpacklul_M0();
1445
            break;
1446
        case 3:
1447
            return 1;
1448
        }
1449
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1450
        gen_op_iwmmxt_set_mup();
1451
        gen_op_iwmmxt_set_cup();
1452
        break;
1453
    case 0x00c: case 0x20c: case 0x40c: case 0x60c:        /* WUNPCKEH */
1454
    case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1455
        wrd = (insn >> 12) & 0xf;
1456
        rd0 = (insn >> 16) & 0xf;
1457
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1458
        switch ((insn >> 22) & 3) {
1459
        case 0:
1460
            if (insn & (1 << 21))
1461
                gen_op_iwmmxt_unpackhsb_M0();
1462
            else
1463
                gen_op_iwmmxt_unpackhub_M0();
1464
            break;
1465
        case 1:
1466
            if (insn & (1 << 21))
1467
                gen_op_iwmmxt_unpackhsw_M0();
1468
            else
1469
                gen_op_iwmmxt_unpackhuw_M0();
1470
            break;
1471
        case 2:
1472
            if (insn & (1 << 21))
1473
                gen_op_iwmmxt_unpackhsl_M0();
1474
            else
1475
                gen_op_iwmmxt_unpackhul_M0();
1476
            break;
1477
        case 3:
1478
            return 1;
1479
        }
1480
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1481
        gen_op_iwmmxt_set_mup();
1482
        gen_op_iwmmxt_set_cup();
1483
        break;
1484
    case 0x204: case 0x604: case 0xa04: case 0xe04:        /* WSRL */
1485
    case 0x214: case 0x614: case 0xa14: case 0xe14:
1486
        wrd = (insn >> 12) & 0xf;
1487
        rd0 = (insn >> 16) & 0xf;
1488
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1489
        if (gen_iwmmxt_shift(insn, 0xff))
1490
            return 1;
1491
        switch ((insn >> 22) & 3) {
1492
        case 0:
1493
            return 1;
1494
        case 1:
1495
            gen_op_iwmmxt_srlw_M0_T0();
1496
            break;
1497
        case 2:
1498
            gen_op_iwmmxt_srll_M0_T0();
1499
            break;
1500
        case 3:
1501
            gen_op_iwmmxt_srlq_M0_T0();
1502
            break;
1503
        }
1504
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1505
        gen_op_iwmmxt_set_mup();
1506
        gen_op_iwmmxt_set_cup();
1507
        break;
1508
    case 0x004: case 0x404: case 0x804: case 0xc04:        /* WSRA */
1509
    case 0x014: case 0x414: case 0x814: case 0xc14:
1510
        wrd = (insn >> 12) & 0xf;
1511
        rd0 = (insn >> 16) & 0xf;
1512
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1513
        if (gen_iwmmxt_shift(insn, 0xff))
1514
            return 1;
1515
        switch ((insn >> 22) & 3) {
1516
        case 0:
1517
            return 1;
1518
        case 1:
1519
            gen_op_iwmmxt_sraw_M0_T0();
1520
            break;
1521
        case 2:
1522
            gen_op_iwmmxt_sral_M0_T0();
1523
            break;
1524
        case 3:
1525
            gen_op_iwmmxt_sraq_M0_T0();
1526
            break;
1527
        }
1528
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1529
        gen_op_iwmmxt_set_mup();
1530
        gen_op_iwmmxt_set_cup();
1531
        break;
1532
    case 0x104: case 0x504: case 0x904: case 0xd04:        /* WSLL */
1533
    case 0x114: case 0x514: case 0x914: case 0xd14:
1534
        wrd = (insn >> 12) & 0xf;
1535
        rd0 = (insn >> 16) & 0xf;
1536
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1537
        if (gen_iwmmxt_shift(insn, 0xff))
1538
            return 1;
1539
        switch ((insn >> 22) & 3) {
1540
        case 0:
1541
            return 1;
1542
        case 1:
1543
            gen_op_iwmmxt_sllw_M0_T0();
1544
            break;
1545
        case 2:
1546
            gen_op_iwmmxt_slll_M0_T0();
1547
            break;
1548
        case 3:
1549
            gen_op_iwmmxt_sllq_M0_T0();
1550
            break;
1551
        }
1552
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1553
        gen_op_iwmmxt_set_mup();
1554
        gen_op_iwmmxt_set_cup();
1555
        break;
1556
    case 0x304: case 0x704: case 0xb04: case 0xf04:        /* WROR */
1557
    case 0x314: case 0x714: case 0xb14: case 0xf14:
1558
        wrd = (insn >> 12) & 0xf;
1559
        rd0 = (insn >> 16) & 0xf;
1560
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1561
        switch ((insn >> 22) & 3) {
1562
        case 0:
1563
            return 1;
1564
        case 1:
1565
            if (gen_iwmmxt_shift(insn, 0xf))
1566
                return 1;
1567
            gen_op_iwmmxt_rorw_M0_T0();
1568
            break;
1569
        case 2:
1570
            if (gen_iwmmxt_shift(insn, 0x1f))
1571
                return 1;
1572
            gen_op_iwmmxt_rorl_M0_T0();
1573
            break;
1574
        case 3:
1575
            if (gen_iwmmxt_shift(insn, 0x3f))
1576
                return 1;
1577
            gen_op_iwmmxt_rorq_M0_T0();
1578
            break;
1579
        }
1580
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1581
        gen_op_iwmmxt_set_mup();
1582
        gen_op_iwmmxt_set_cup();
1583
        break;
1584
    case 0x116: case 0x316: case 0x516: case 0x716:        /* WMIN */
1585
    case 0x916: case 0xb16: case 0xd16: case 0xf16:
1586
        wrd = (insn >> 12) & 0xf;
1587
        rd0 = (insn >> 16) & 0xf;
1588
        rd1 = (insn >> 0) & 0xf;
1589
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1590
        switch ((insn >> 22) & 3) {
1591
        case 0:
1592
            if (insn & (1 << 21))
1593
                gen_op_iwmmxt_minsb_M0_wRn(rd1);
1594
            else
1595
                gen_op_iwmmxt_minub_M0_wRn(rd1);
1596
            break;
1597
        case 1:
1598
            if (insn & (1 << 21))
1599
                gen_op_iwmmxt_minsw_M0_wRn(rd1);
1600
            else
1601
                gen_op_iwmmxt_minuw_M0_wRn(rd1);
1602
            break;
1603
        case 2:
1604
            if (insn & (1 << 21))
1605
                gen_op_iwmmxt_minsl_M0_wRn(rd1);
1606
            else
1607
                gen_op_iwmmxt_minul_M0_wRn(rd1);
1608
            break;
1609
        case 3:
1610
            return 1;
1611
        }
1612
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1613
        gen_op_iwmmxt_set_mup();
1614
        break;
1615
    case 0x016: case 0x216: case 0x416: case 0x616:        /* WMAX */
1616
    case 0x816: case 0xa16: case 0xc16: case 0xe16:
1617
        wrd = (insn >> 12) & 0xf;
1618
        rd0 = (insn >> 16) & 0xf;
1619
        rd1 = (insn >> 0) & 0xf;
1620
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1621
        switch ((insn >> 22) & 3) {
1622
        case 0:
1623
            if (insn & (1 << 21))
1624
                gen_op_iwmmxt_maxsb_M0_wRn(rd1);
1625
            else
1626
                gen_op_iwmmxt_maxub_M0_wRn(rd1);
1627
            break;
1628
        case 1:
1629
            if (insn & (1 << 21))
1630
                gen_op_iwmmxt_maxsw_M0_wRn(rd1);
1631
            else
1632
                gen_op_iwmmxt_maxuw_M0_wRn(rd1);
1633
            break;
1634
        case 2:
1635
            if (insn & (1 << 21))
1636
                gen_op_iwmmxt_maxsl_M0_wRn(rd1);
1637
            else
1638
                gen_op_iwmmxt_maxul_M0_wRn(rd1);
1639
            break;
1640
        case 3:
1641
            return 1;
1642
        }
1643
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1644
        gen_op_iwmmxt_set_mup();
1645
        break;
1646
    case 0x002: case 0x102: case 0x202: case 0x302:        /* WALIGNI */
1647
    case 0x402: case 0x502: case 0x602: case 0x702:
1648
        wrd = (insn >> 12) & 0xf;
1649
        rd0 = (insn >> 16) & 0xf;
1650
        rd1 = (insn >> 0) & 0xf;
1651
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1652
        gen_op_movl_T0_im((insn >> 20) & 3);
1653
        gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1654
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1655
        gen_op_iwmmxt_set_mup();
1656
        break;
1657
    case 0x01a: case 0x11a: case 0x21a: case 0x31a:        /* WSUB */
1658
    case 0x41a: case 0x51a: case 0x61a: case 0x71a:
1659
    case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
1660
    case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
1661
        wrd = (insn >> 12) & 0xf;
1662
        rd0 = (insn >> 16) & 0xf;
1663
        rd1 = (insn >> 0) & 0xf;
1664
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1665
        switch ((insn >> 20) & 0xf) {
1666
        case 0x0:
1667
            gen_op_iwmmxt_subnb_M0_wRn(rd1);
1668
            break;
1669
        case 0x1:
1670
            gen_op_iwmmxt_subub_M0_wRn(rd1);
1671
            break;
1672
        case 0x3:
1673
            gen_op_iwmmxt_subsb_M0_wRn(rd1);
1674
            break;
1675
        case 0x4:
1676
            gen_op_iwmmxt_subnw_M0_wRn(rd1);
1677
            break;
1678
        case 0x5:
1679
            gen_op_iwmmxt_subuw_M0_wRn(rd1);
1680
            break;
1681
        case 0x7:
1682
            gen_op_iwmmxt_subsw_M0_wRn(rd1);
1683
            break;
1684
        case 0x8:
1685
            gen_op_iwmmxt_subnl_M0_wRn(rd1);
1686
            break;
1687
        case 0x9:
1688
            gen_op_iwmmxt_subul_M0_wRn(rd1);
1689
            break;
1690
        case 0xb:
1691
            gen_op_iwmmxt_subsl_M0_wRn(rd1);
1692
            break;
1693
        default:
1694
            return 1;
1695
        }
1696
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1697
        gen_op_iwmmxt_set_mup();
1698
        gen_op_iwmmxt_set_cup();
1699
        break;
1700
    case 0x01e: case 0x11e: case 0x21e: case 0x31e:        /* WSHUFH */
1701
    case 0x41e: case 0x51e: case 0x61e: case 0x71e:
1702
    case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
1703
    case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
1704
        wrd = (insn >> 12) & 0xf;
1705
        rd0 = (insn >> 16) & 0xf;
1706
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1707
        gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
1708
        gen_op_iwmmxt_shufh_M0_T0();
1709
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1710
        gen_op_iwmmxt_set_mup();
1711
        gen_op_iwmmxt_set_cup();
1712
        break;
1713
    case 0x018: case 0x118: case 0x218: case 0x318:        /* WADD */
1714
    case 0x418: case 0x518: case 0x618: case 0x718:
1715
    case 0x818: case 0x918: case 0xa18: case 0xb18:
1716
    case 0xc18: case 0xd18: case 0xe18: case 0xf18:
1717
        wrd = (insn >> 12) & 0xf;
1718
        rd0 = (insn >> 16) & 0xf;
1719
        rd1 = (insn >> 0) & 0xf;
1720
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1721
        switch ((insn >> 20) & 0xf) {
1722
        case 0x0:
1723
            gen_op_iwmmxt_addnb_M0_wRn(rd1);
1724
            break;
1725
        case 0x1:
1726
            gen_op_iwmmxt_addub_M0_wRn(rd1);
1727
            break;
1728
        case 0x3:
1729
            gen_op_iwmmxt_addsb_M0_wRn(rd1);
1730
            break;
1731
        case 0x4:
1732
            gen_op_iwmmxt_addnw_M0_wRn(rd1);
1733
            break;
1734
        case 0x5:
1735
            gen_op_iwmmxt_adduw_M0_wRn(rd1);
1736
            break;
1737
        case 0x7:
1738
            gen_op_iwmmxt_addsw_M0_wRn(rd1);
1739
            break;
1740
        case 0x8:
1741
            gen_op_iwmmxt_addnl_M0_wRn(rd1);
1742
            break;
1743
        case 0x9:
1744
            gen_op_iwmmxt_addul_M0_wRn(rd1);
1745
            break;
1746
        case 0xb:
1747
            gen_op_iwmmxt_addsl_M0_wRn(rd1);
1748
            break;
1749
        default:
1750
            return 1;
1751
        }
1752
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1753
        gen_op_iwmmxt_set_mup();
1754
        gen_op_iwmmxt_set_cup();
1755
        break;
1756
    case 0x008: case 0x108: case 0x208: case 0x308:        /* WPACK */
1757
    case 0x408: case 0x508: case 0x608: case 0x708:
1758
    case 0x808: case 0x908: case 0xa08: case 0xb08:
1759
    case 0xc08: case 0xd08: case 0xe08: case 0xf08:
1760
        wrd = (insn >> 12) & 0xf;
1761
        rd0 = (insn >> 16) & 0xf;
1762
        rd1 = (insn >> 0) & 0xf;
1763
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1764
        if (!(insn & (1 << 20)))
1765
            return 1;
1766
        switch ((insn >> 22) & 3) {
1767
        case 0:
1768
            return 1;
1769
        case 1:
1770
            if (insn & (1 << 21))
1771
                gen_op_iwmmxt_packsw_M0_wRn(rd1);
1772
            else
1773
                gen_op_iwmmxt_packuw_M0_wRn(rd1);
1774
            break;
1775
        case 2:
1776
            if (insn & (1 << 21))
1777
                gen_op_iwmmxt_packsl_M0_wRn(rd1);
1778
            else
1779
                gen_op_iwmmxt_packul_M0_wRn(rd1);
1780
            break;
1781
        case 3:
1782
            if (insn & (1 << 21))
1783
                gen_op_iwmmxt_packsq_M0_wRn(rd1);
1784
            else
1785
                gen_op_iwmmxt_packuq_M0_wRn(rd1);
1786
            break;
1787
        }
1788
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1789
        gen_op_iwmmxt_set_mup();
1790
        gen_op_iwmmxt_set_cup();
1791
        break;
1792
    case 0x201: case 0x203: case 0x205: case 0x207:
1793
    case 0x209: case 0x20b: case 0x20d: case 0x20f:
1794
    case 0x211: case 0x213: case 0x215: case 0x217:
1795
    case 0x219: case 0x21b: case 0x21d: case 0x21f:
1796
        wrd = (insn >> 5) & 0xf;
1797
        rd0 = (insn >> 12) & 0xf;
1798
        rd1 = (insn >> 0) & 0xf;
1799
        if (rd0 == 0xf || rd1 == 0xf)
1800
            return 1;
1801
        gen_op_iwmmxt_movq_M0_wRn(wrd);
1802
        switch ((insn >> 16) & 0xf) {
1803
        case 0x0:                                        /* TMIA */
1804
            gen_movl_T0_reg(s, rd0);
1805
            gen_movl_T1_reg(s, rd1);
1806
            gen_op_iwmmxt_muladdsl_M0_T0_T1();
1807
            break;
1808
        case 0x8:                                        /* TMIAPH */
1809
            gen_movl_T0_reg(s, rd0);
1810
            gen_movl_T1_reg(s, rd1);
1811
            gen_op_iwmmxt_muladdsw_M0_T0_T1();
1812
            break;
1813
        case 0xc: case 0xd: case 0xe: case 0xf:                /* TMIAxy */
1814
            gen_movl_T1_reg(s, rd0);
1815
            if (insn & (1 << 16))
1816
                gen_op_shrl_T1_im(16);
1817
            gen_op_movl_T0_T1();
1818
            gen_movl_T1_reg(s, rd1);
1819
            if (insn & (1 << 17))
1820
                gen_op_shrl_T1_im(16);
1821
            gen_op_iwmmxt_muladdswl_M0_T0_T1();
1822
            break;
1823
        default:
1824
            return 1;
1825
        }
1826
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1827
        gen_op_iwmmxt_set_mup();
1828
        break;
1829
    default:
1830
        return 1;
1831
    }
1832

    
1833
    return 0;
1834
}
1835

    
1836
/* Disassemble an XScale DSP instruction.  Returns nonzero if an error occured
1837
   (ie. an undefined instruction).  */
1838
static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1839
{
1840
    int acc, rd0, rd1, rdhi, rdlo;
1841

    
1842
    if ((insn & 0x0ff00f10) == 0x0e200010) {
1843
        /* Multiply with Internal Accumulate Format */
1844
        rd0 = (insn >> 12) & 0xf;
1845
        rd1 = insn & 0xf;
1846
        acc = (insn >> 5) & 7;
1847

    
1848
        if (acc != 0)
1849
            return 1;
1850

    
1851
        switch ((insn >> 16) & 0xf) {
1852
        case 0x0:                                        /* MIA */
1853
            gen_movl_T0_reg(s, rd0);
1854
            gen_movl_T1_reg(s, rd1);
1855
            gen_op_iwmmxt_muladdsl_M0_T0_T1();
1856
            break;
1857
        case 0x8:                                        /* MIAPH */
1858
            gen_movl_T0_reg(s, rd0);
1859
            gen_movl_T1_reg(s, rd1);
1860
            gen_op_iwmmxt_muladdsw_M0_T0_T1();
1861
            break;
1862
        case 0xc:                                        /* MIABB */
1863
        case 0xd:                                        /* MIABT */
1864
        case 0xe:                                        /* MIATB */
1865
        case 0xf:                                        /* MIATT */
1866
            gen_movl_T1_reg(s, rd0);
1867
            if (insn & (1 << 16))
1868
                gen_op_shrl_T1_im(16);
1869
            gen_op_movl_T0_T1();
1870
            gen_movl_T1_reg(s, rd1);
1871
            if (insn & (1 << 17))
1872
                gen_op_shrl_T1_im(16);
1873
            gen_op_iwmmxt_muladdswl_M0_T0_T1();
1874
            break;
1875
        default:
1876
            return 1;
1877
        }
1878

    
1879
        gen_op_iwmmxt_movq_wRn_M0(acc);
1880
        return 0;
1881
    }
1882

    
1883
    if ((insn & 0x0fe00ff8) == 0x0c400000) {
1884
        /* Internal Accumulator Access Format */
1885
        rdhi = (insn >> 16) & 0xf;
1886
        rdlo = (insn >> 12) & 0xf;
1887
        acc = insn & 7;
1888

    
1889
        if (acc != 0)
1890
            return 1;
1891

    
1892
        if (insn & ARM_CP_RW_BIT) {                        /* MRA */
1893
            gen_op_iwmmxt_movl_T0_T1_wRn(acc);
1894
            gen_movl_reg_T0(s, rdlo);
1895
            gen_op_movl_T0_im((1 << (40 - 32)) - 1);
1896
            gen_op_andl_T0_T1();
1897
            gen_movl_reg_T0(s, rdhi);
1898
        } else {                                        /* MAR */
1899
            gen_movl_T0_reg(s, rdlo);
1900
            gen_movl_T1_reg(s, rdhi);
1901
            gen_op_iwmmxt_movl_wRn_T0_T1(acc);
1902
        }
1903
        return 0;
1904
    }
1905

    
1906
    return 1;
1907
}
1908

    
1909
/* Disassemble system coprocessor instruction.  Return nonzero if
1910
   instruction is not defined.  */
1911
static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1912
{
1913
    uint32_t rd = (insn >> 12) & 0xf;
1914
    uint32_t cp = (insn >> 8) & 0xf;
1915
    if (IS_USER(s)) {
1916
        return 1;
1917
    }
1918

    
1919
    if (insn & ARM_CP_RW_BIT) {
1920
        if (!env->cp[cp].cp_read)
1921
            return 1;
1922
        gen_op_movl_T0_im((uint32_t) s->pc);
1923
        gen_set_pc_T0();
1924
        gen_op_movl_T0_cp(insn);
1925
        gen_movl_reg_T0(s, rd);
1926
    } else {
1927
        if (!env->cp[cp].cp_write)
1928
            return 1;
1929
        gen_op_movl_T0_im((uint32_t) s->pc);
1930
        gen_set_pc_T0();
1931
        gen_movl_T0_reg(s, rd);
1932
        gen_op_movl_cp_T0(insn);
1933
    }
1934
    return 0;
1935
}
1936

    
1937
static int cp15_user_ok(uint32_t insn)
1938
{
1939
    int cpn = (insn >> 16) & 0xf;
1940
    int cpm = insn & 0xf;
1941
    int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
1942

    
1943
    if (cpn == 13 && cpm == 0) {
1944
        /* TLS register.  */
1945
        if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
1946
            return 1;
1947
    }
1948
    if (cpn == 7) {
1949
        /* ISB, DSB, DMB.  */
1950
        if ((cpm == 5 && op == 4)
1951
                || (cpm == 10 && (op == 4 || op == 5)))
1952
            return 1;
1953
    }
1954
    return 0;
1955
}
1956

    
1957
/* Disassemble system coprocessor (cp15) instruction.  Return nonzero if
1958
   instruction is not defined.  */
1959
static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
1960
{
1961
    uint32_t rd;
1962

    
1963
    /* M profile cores use memory mapped registers instead of cp15.  */
1964
    if (arm_feature(env, ARM_FEATURE_M))
1965
        return 1;
1966

    
1967
    if ((insn & (1 << 25)) == 0) {
1968
        if (insn & (1 << 20)) {
1969
            /* mrrc */
1970
            return 1;
1971
        }
1972
        /* mcrr.  Used for block cache operations, so implement as no-op.  */
1973
        return 0;
1974
    }
1975
    if ((insn & (1 << 4)) == 0) {
1976
        /* cdp */
1977
        return 1;
1978
    }
1979
    if (IS_USER(s) && !cp15_user_ok(insn)) {
1980
        return 1;
1981
    }
1982
    if ((insn & 0x0fff0fff) == 0x0e070f90
1983
        || (insn & 0x0fff0fff) == 0x0e070f58) {
1984
        /* Wait for interrupt.  */
1985
        gen_op_movl_T0_im((long)s->pc);
1986
        gen_set_pc_T0();
1987
        s->is_jmp = DISAS_WFI;
1988
        return 0;
1989
    }
1990
    rd = (insn >> 12) & 0xf;
1991
    if (insn & ARM_CP_RW_BIT) {
1992
        gen_op_movl_T0_cp15(insn);
1993
        /* If the destination register is r15 then sets condition codes.  */
1994
        if (rd != 15)
1995
            gen_movl_reg_T0(s, rd);
1996
    } else {
1997
        gen_movl_T0_reg(s, rd);
1998
        gen_op_movl_cp15_T0(insn);
1999
        /* Normally we would always end the TB here, but Linux
2000
         * arch/arm/mach-pxa/sleep.S expects two instructions following
2001
         * an MMU enable to execute from cache.  Imitate this behaviour.  */
2002
        if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2003
                (insn & 0x0fff0fff) != 0x0e010f10)
2004
            gen_lookup_tb(s);
2005
    }
2006
    return 0;
2007
}
2008

    
2009
#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2010
#define VFP_SREG(insn, bigbit, smallbit) \
2011
  ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2012
#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2013
    if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2014
        reg = (((insn) >> (bigbit)) & 0x0f) \
2015
              | (((insn) >> ((smallbit) - 4)) & 0x10); \
2016
    } else { \
2017
        if (insn & (1 << (smallbit))) \
2018
            return 1; \
2019
        reg = ((insn) >> (bigbit)) & 0x0f; \
2020
    }} while (0)
2021

    
2022
#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2023
#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2024
#define VFP_SREG_N(insn) VFP_SREG(insn, 16,  7)
2025
#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16,  7)
2026
#define VFP_SREG_M(insn) VFP_SREG(insn,  0,  5)
2027
#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn,  0,  5)
2028

    
2029
static inline int
2030
vfp_enabled(CPUState * env)
2031
{
2032
    return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2033
}
2034

    
2035
/* Disassemble a VFP instruction.  Returns nonzero if an error occured
2036
   (ie. an undefined instruction).  */
2037
static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2038
{
2039
    uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2040
    int dp, veclen;
2041

    
2042
    if (!arm_feature(env, ARM_FEATURE_VFP))
2043
        return 1;
2044

    
2045
    if (!vfp_enabled(env)) {
2046
        /* VFP disabled.  Only allow fmxr/fmrx to/from some control regs.  */
2047
        if ((insn & 0x0fe00fff) != 0x0ee00a10)
2048
            return 1;
2049
        rn = (insn >> 16) & 0xf;
2050
        if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2051
            && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2052
            return 1;
2053
    }
2054
    dp = ((insn & 0xf00) == 0xb00);
2055
    switch ((insn >> 24) & 0xf) {
2056
    case 0xe:
2057
        if (insn & (1 << 4)) {
2058
            /* single register transfer */
2059
            rd = (insn >> 12) & 0xf;
2060
            if (dp) {
2061
                int size;
2062
                int pass;
2063

    
2064
                VFP_DREG_N(rn, insn);
2065
                if (insn & 0xf)
2066
                    return 1;
2067
                if (insn & 0x00c00060
2068
                    && !arm_feature(env, ARM_FEATURE_NEON))
2069
                    return 1;
2070

    
2071
                pass = (insn >> 21) & 1;
2072
                if (insn & (1 << 22)) {
2073
                    size = 0;
2074
                    offset = ((insn >> 5) & 3) * 8;
2075
                } else if (insn & (1 << 5)) {
2076
                    size = 1;
2077
                    offset = (insn & (1 << 6)) ? 16 : 0;
2078
                } else {
2079
                    size = 2;
2080
                    offset = 0;
2081
                }
2082
                if (insn & ARM_CP_RW_BIT) {
2083
                    /* vfp->arm */
2084
                    switch (size) {
2085
                    case 0:
2086
                        NEON_GET_REG(T1, rn, pass);
2087
                        if (offset)
2088
                            gen_op_shrl_T1_im(offset);
2089
                        if (insn & (1 << 23))
2090
                            gen_uxtb(cpu_T[1]);
2091
                        else
2092
                            gen_sxtb(cpu_T[1]);
2093
                        break;
2094
                    case 1:
2095
                        NEON_GET_REG(T1, rn, pass);
2096
                        if (insn & (1 << 23)) {
2097
                            if (offset) {
2098
                                gen_op_shrl_T1_im(16);
2099
                            } else {
2100
                                gen_uxth(cpu_T[1]);
2101
                            }
2102
                        } else {
2103
                            if (offset) {
2104
                                gen_op_sarl_T1_im(16);
2105
                            } else {
2106
                                gen_sxth(cpu_T[1]);
2107
                            }
2108
                        }
2109
                        break;
2110
                    case 2:
2111
                        NEON_GET_REG(T1, rn, pass);
2112
                        break;
2113
                    }
2114
                    gen_movl_reg_T1(s, rd);
2115
                } else {
2116
                    /* arm->vfp */
2117
                    gen_movl_T0_reg(s, rd);
2118
                    if (insn & (1 << 23)) {
2119
                        /* VDUP */
2120
                        if (size == 0) {
2121
                            gen_op_neon_dup_u8(0);
2122
                        } else if (size == 1) {
2123
                            gen_op_neon_dup_low16();
2124
                        }
2125
                        NEON_SET_REG(T0, rn, 0);
2126
                        NEON_SET_REG(T0, rn, 1);
2127
                    } else {
2128
                        /* VMOV */
2129
                        switch (size) {
2130
                        case 0:
2131
                            NEON_GET_REG(T2, rn, pass);
2132
                            gen_op_movl_T1_im(0xff);
2133
                            gen_op_andl_T0_T1();
2134
                            gen_op_neon_insert_elt(offset, ~(0xff << offset));
2135
                            NEON_SET_REG(T2, rn, pass);
2136
                            break;
2137
                        case 1:
2138
                            NEON_GET_REG(T2, rn, pass);
2139
                            gen_op_movl_T1_im(0xffff);
2140
                            gen_op_andl_T0_T1();
2141
                            bank_mask = offset ? 0xffff : 0xffff0000;
2142
                            gen_op_neon_insert_elt(offset, bank_mask);
2143
                            NEON_SET_REG(T2, rn, pass);
2144
                            break;
2145
                        case 2:
2146
                            NEON_SET_REG(T0, rn, pass);
2147
                            break;
2148
                        }
2149
                    }
2150
                }
2151
            } else { /* !dp */
2152
                if ((insn & 0x6f) != 0x00)
2153
                    return 1;
2154
                rn = VFP_SREG_N(insn);
2155
                if (insn & ARM_CP_RW_BIT) {
2156
                    /* vfp->arm */
2157
                    if (insn & (1 << 21)) {
2158
                        /* system register */
2159
                        rn >>= 1;
2160

    
2161
                        switch (rn) {
2162
                        case ARM_VFP_FPSID:
2163
                            /* VFP2 allows access for FSID from userspace.
2164
                               VFP3 restricts all id registers to privileged
2165
                               accesses.  */
2166
                            if (IS_USER(s)
2167
                                && arm_feature(env, ARM_FEATURE_VFP3))
2168
                                return 1;
2169
                            gen_op_vfp_movl_T0_xreg(rn);
2170
                            break;
2171
                        case ARM_VFP_FPEXC:
2172
                            if (IS_USER(s))
2173
                                return 1;
2174
                            gen_op_vfp_movl_T0_xreg(rn);
2175
                            break;
2176
                        case ARM_VFP_FPINST:
2177
                        case ARM_VFP_FPINST2:
2178
                            /* Not present in VFP3.  */
2179
                            if (IS_USER(s)
2180
                                || arm_feature(env, ARM_FEATURE_VFP3))
2181
                                return 1;
2182
                            gen_op_vfp_movl_T0_xreg(rn);
2183
                            break;
2184
                        case ARM_VFP_FPSCR:
2185
                            if (rd == 15)
2186
                                gen_op_vfp_movl_T0_fpscr_flags();
2187
                            else
2188
                                gen_op_vfp_movl_T0_fpscr();
2189
                            break;
2190
                        case ARM_VFP_MVFR0:
2191
                        case ARM_VFP_MVFR1:
2192
                            if (IS_USER(s)
2193
                                || !arm_feature(env, ARM_FEATURE_VFP3))
2194
                                return 1;
2195
                            gen_op_vfp_movl_T0_xreg(rn);
2196
                            break;
2197
                        default:
2198
                            return 1;
2199
                        }
2200
                    } else {
2201
                        gen_mov_F0_vreg(0, rn);
2202
                        gen_op_vfp_mrs();
2203
                    }
2204
                    if (rd == 15) {
2205
                        /* Set the 4 flag bits in the CPSR.  */
2206
                        gen_op_movl_cpsr_T0(0xf0000000);
2207
                    } else
2208
                        gen_movl_reg_T0(s, rd);
2209
                } else {
2210
                    /* arm->vfp */
2211
                    gen_movl_T0_reg(s, rd);
2212
                    if (insn & (1 << 21)) {
2213
                        rn >>= 1;
2214
                        /* system register */
2215
                        switch (rn) {
2216
                        case ARM_VFP_FPSID:
2217
                        case ARM_VFP_MVFR0:
2218
                        case ARM_VFP_MVFR1:
2219
                            /* Writes are ignored.  */
2220
                            break;
2221
                        case ARM_VFP_FPSCR:
2222
                            gen_op_vfp_movl_fpscr_T0();
2223
                            gen_lookup_tb(s);
2224
                            break;
2225
                        case ARM_VFP_FPEXC:
2226
                            if (IS_USER(s))
2227
                                return 1;
2228
                            gen_op_vfp_movl_xreg_T0(rn);
2229
                            gen_lookup_tb(s);
2230
                            break;
2231
                        case ARM_VFP_FPINST:
2232
                        case ARM_VFP_FPINST2:
2233
                            gen_op_vfp_movl_xreg_T0(rn);
2234
                            break;
2235
                        default:
2236
                            return 1;
2237
                        }
2238
                    } else {
2239
                        gen_op_vfp_msr();
2240
                        gen_mov_vreg_F0(0, rn);
2241
                    }
2242
                }
2243
            }
2244
        } else {
2245
            /* data processing */
2246
            /* The opcode is in bits 23, 21, 20 and 6.  */
2247
            op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2248
            if (dp) {
2249
                if (op == 15) {
2250
                    /* rn is opcode */
2251
                    rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2252
                } else {
2253
                    /* rn is register number */
2254
                    VFP_DREG_N(rn, insn);
2255
                }
2256

    
2257
                if (op == 15 && (rn == 15 || rn > 17)) {
2258
                    /* Integer or single precision destination.  */
2259
                    rd = VFP_SREG_D(insn);
2260
                } else {
2261
                    VFP_DREG_D(rd, insn);
2262
                }
2263

    
2264
                if (op == 15 && (rn == 16 || rn == 17)) {
2265
                    /* Integer source.  */
2266
                    rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2267
                } else {
2268
                    VFP_DREG_M(rm, insn);
2269
                }
2270
            } else {
2271
                rn = VFP_SREG_N(insn);
2272
                if (op == 15 && rn == 15) {
2273
                    /* Double precision destination.  */
2274
                    VFP_DREG_D(rd, insn);
2275
                } else {
2276
                    rd = VFP_SREG_D(insn);
2277
                }
2278
                rm = VFP_SREG_M(insn);
2279
            }
2280

    
2281
            veclen = env->vfp.vec_len;
2282
            if (op == 15 && rn > 3)
2283
                veclen = 0;
2284

    
2285
            /* Shut up compiler warnings.  */
2286
            delta_m = 0;
2287
            delta_d = 0;
2288
            bank_mask = 0;
2289

    
2290
            if (veclen > 0) {
2291
                if (dp)
2292
                    bank_mask = 0xc;
2293
                else
2294
                    bank_mask = 0x18;
2295

    
2296
                /* Figure out what type of vector operation this is.  */
2297
                if ((rd & bank_mask) == 0) {
2298
                    /* scalar */
2299
                    veclen = 0;
2300
                } else {
2301
                    if (dp)
2302
                        delta_d = (env->vfp.vec_stride >> 1) + 1;
2303
                    else
2304
                        delta_d = env->vfp.vec_stride + 1;
2305

    
2306
                    if ((rm & bank_mask) == 0) {
2307
                        /* mixed scalar/vector */
2308
                        delta_m = 0;
2309
                    } else {
2310
                        /* vector */
2311
                        delta_m = delta_d;
2312
                    }
2313
                }
2314
            }
2315

    
2316
            /* Load the initial operands.  */
2317
            if (op == 15) {
2318
                switch (rn) {
2319
                case 16:
2320
                case 17:
2321
                    /* Integer source */
2322
                    gen_mov_F0_vreg(0, rm);
2323
                    break;
2324
                case 8:
2325
                case 9:
2326
                    /* Compare */
2327
                    gen_mov_F0_vreg(dp, rd);
2328
                    gen_mov_F1_vreg(dp, rm);
2329
                    break;
2330
                case 10:
2331
                case 11:
2332
                    /* Compare with zero */
2333
                    gen_mov_F0_vreg(dp, rd);
2334
                    gen_vfp_F1_ld0(dp);
2335
                    break;
2336
                case 20:
2337
                case 21:
2338
                case 22:
2339
                case 23:
2340
                    /* Source and destination the same.  */
2341
                    gen_mov_F0_vreg(dp, rd);
2342
                    break;
2343
                default:
2344
                    /* One source operand.  */
2345
                    gen_mov_F0_vreg(dp, rm);
2346
                    break;
2347
                }
2348
            } else {
2349
                /* Two source operands.  */
2350
                gen_mov_F0_vreg(dp, rn);
2351
                gen_mov_F1_vreg(dp, rm);
2352
            }
2353

    
2354
            for (;;) {
2355
                /* Perform the calculation.  */
2356
                switch (op) {
2357
                case 0: /* mac: fd + (fn * fm) */
2358
                    gen_vfp_mul(dp);
2359
                    gen_mov_F1_vreg(dp, rd);
2360
                    gen_vfp_add(dp);
2361
                    break;
2362
                case 1: /* nmac: fd - (fn * fm) */
2363
                    gen_vfp_mul(dp);
2364
                    gen_vfp_neg(dp);
2365
                    gen_mov_F1_vreg(dp, rd);
2366
                    gen_vfp_add(dp);
2367
                    break;
2368
                case 2: /* msc: -fd + (fn * fm) */
2369
                    gen_vfp_mul(dp);
2370
                    gen_mov_F1_vreg(dp, rd);
2371
                    gen_vfp_sub(dp);
2372
                    break;
2373
                case 3: /* nmsc: -fd - (fn * fm)  */
2374
                    gen_vfp_mul(dp);
2375
                    gen_mov_F1_vreg(dp, rd);
2376
                    gen_vfp_add(dp);
2377
                    gen_vfp_neg(dp);
2378
                    break;
2379
                case 4: /* mul: fn * fm */
2380
                    gen_vfp_mul(dp);
2381
                    break;
2382
                case 5: /* nmul: -(fn * fm) */
2383
                    gen_vfp_mul(dp);
2384
                    gen_vfp_neg(dp);
2385
                    break;
2386
                case 6: /* add: fn + fm */
2387
                    gen_vfp_add(dp);
2388
                    break;
2389
                case 7: /* sub: fn - fm */
2390
                    gen_vfp_sub(dp);
2391
                    break;
2392
                case 8: /* div: fn / fm */
2393
                    gen_vfp_div(dp);
2394
                    break;
2395
                case 14: /* fconst */
2396
                    if (!arm_feature(env, ARM_FEATURE_VFP3))
2397
                      return 1;
2398

    
2399
                    n = (insn << 12) & 0x80000000;
2400
                    i = ((insn >> 12) & 0x70) | (insn & 0xf);
2401
                    if (dp) {
2402
                        if (i & 0x40)
2403
                            i |= 0x3f80;
2404
                        else
2405
                            i |= 0x4000;
2406
                        n |= i << 16;
2407
                    } else {
2408
                        if (i & 0x40)
2409
                            i |= 0x780;
2410
                        else
2411
                            i |= 0x800;
2412
                        n |= i << 19;
2413
                    }
2414
                    gen_vfp_fconst(dp, n);
2415
                    break;
2416
                case 15: /* extension space */
2417
                    switch (rn) {
2418
                    case 0: /* cpy */
2419
                        /* no-op */
2420
                        break;
2421
                    case 1: /* abs */
2422
                        gen_vfp_abs(dp);
2423
                        break;
2424
                    case 2: /* neg */
2425
                        gen_vfp_neg(dp);
2426
                        break;
2427
                    case 3: /* sqrt */
2428
                        gen_vfp_sqrt(dp);
2429
                        break;
2430
                    case 8: /* cmp */
2431
                        gen_vfp_cmp(dp);
2432
                        break;
2433
                    case 9: /* cmpe */
2434
                        gen_vfp_cmpe(dp);
2435
                        break;
2436
                    case 10: /* cmpz */
2437
                        gen_vfp_cmp(dp);
2438
                        break;
2439
                    case 11: /* cmpez */
2440
                        gen_vfp_F1_ld0(dp);
2441
                        gen_vfp_cmpe(dp);
2442
                        break;
2443
                    case 15: /* single<->double conversion */
2444
                        if (dp)
2445
                            gen_op_vfp_fcvtsd();
2446
                        else
2447
                            gen_op_vfp_fcvtds();
2448
                        break;
2449
                    case 16: /* fuito */
2450
                        gen_vfp_uito(dp);
2451
                        break;
2452
                    case 17: /* fsito */
2453
                        gen_vfp_sito(dp);
2454
                        break;
2455
                    case 20: /* fshto */
2456
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2457
                          return 1;
2458
                        gen_vfp_shto(dp, rm);
2459
                        break;
2460
                    case 21: /* fslto */
2461
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2462
                          return 1;
2463
                        gen_vfp_slto(dp, rm);
2464
                        break;
2465
                    case 22: /* fuhto */
2466
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2467
                          return 1;
2468
                        gen_vfp_uhto(dp, rm);
2469
                        break;
2470
                    case 23: /* fulto */
2471
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2472
                          return 1;
2473
                        gen_vfp_ulto(dp, rm);
2474
                        break;
2475
                    case 24: /* ftoui */
2476
                        gen_vfp_toui(dp);
2477
                        break;
2478
                    case 25: /* ftouiz */
2479
                        gen_vfp_touiz(dp);
2480
                        break;
2481
                    case 26: /* ftosi */
2482
                        gen_vfp_tosi(dp);
2483
                        break;
2484
                    case 27: /* ftosiz */
2485
                        gen_vfp_tosiz(dp);
2486
                        break;
2487
                    case 28: /* ftosh */
2488
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2489
                          return 1;
2490
                        gen_vfp_tosh(dp, rm);
2491
                        break;
2492
                    case 29: /* ftosl */
2493
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2494
                          return 1;
2495
                        gen_vfp_tosl(dp, rm);
2496
                        break;
2497
                    case 30: /* ftouh */
2498
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2499
                          return 1;
2500
                        gen_vfp_touh(dp, rm);
2501
                        break;
2502
                    case 31: /* ftoul */
2503
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2504
                          return 1;
2505
                        gen_vfp_toul(dp, rm);
2506
                        break;
2507
                    default: /* undefined */
2508
                        printf ("rn:%d\n", rn);
2509
                        return 1;
2510
                    }
2511
                    break;
2512
                default: /* undefined */
2513
                    printf ("op:%d\n", op);
2514
                    return 1;
2515
                }
2516

    
2517
                /* Write back the result.  */
2518
                if (op == 15 && (rn >= 8 && rn <= 11))
2519
                    ; /* Comparison, do nothing.  */
2520
                else if (op == 15 && rn > 17)
2521
                    /* Integer result.  */
2522
                    gen_mov_vreg_F0(0, rd);
2523
                else if (op == 15 && rn == 15)
2524
                    /* conversion */
2525
                    gen_mov_vreg_F0(!dp, rd);
2526
                else
2527
                    gen_mov_vreg_F0(dp, rd);
2528

    
2529
                /* break out of the loop if we have finished  */
2530
                if (veclen == 0)
2531
                    break;
2532

    
2533
                if (op == 15 && delta_m == 0) {
2534
                    /* single source one-many */
2535
                    while (veclen--) {
2536
                        rd = ((rd + delta_d) & (bank_mask - 1))
2537
                             | (rd & bank_mask);
2538
                        gen_mov_vreg_F0(dp, rd);
2539
                    }
2540
                    break;
2541
                }
2542
                /* Setup the next operands.  */
2543
                veclen--;
2544
                rd = ((rd + delta_d) & (bank_mask - 1))
2545
                     | (rd & bank_mask);
2546

    
2547
                if (op == 15) {
2548
                    /* One source operand.  */
2549
                    rm = ((rm + delta_m) & (bank_mask - 1))
2550
                         | (rm & bank_mask);
2551
                    gen_mov_F0_vreg(dp, rm);
2552
                } else {
2553
                    /* Two source operands.  */
2554
                    rn = ((rn + delta_d) & (bank_mask - 1))
2555
                         | (rn & bank_mask);
2556
                    gen_mov_F0_vreg(dp, rn);
2557
                    if (delta_m) {
2558
                        rm = ((rm + delta_m) & (bank_mask - 1))
2559
                             | (rm & bank_mask);
2560
                        gen_mov_F1_vreg(dp, rm);
2561
                    }
2562
                }
2563
            }
2564
        }
2565
        break;
2566
    case 0xc:
2567
    case 0xd:
2568
        if (dp && (insn & 0x03e00000) == 0x00400000) {
2569
            /* two-register transfer */
2570
            rn = (insn >> 16) & 0xf;
2571
            rd = (insn >> 12) & 0xf;
2572
            if (dp) {
2573
                VFP_DREG_M(rm, insn);
2574
            } else {
2575
                rm = VFP_SREG_M(insn);
2576
            }
2577

    
2578
            if (insn & ARM_CP_RW_BIT) {
2579
                /* vfp->arm */
2580
                if (dp) {
2581
                    gen_mov_F0_vreg(1, rm);
2582
                    gen_op_vfp_mrrd();
2583
                    gen_movl_reg_T0(s, rd);
2584
                    gen_movl_reg_T1(s, rn);
2585
                } else {
2586
                    gen_mov_F0_vreg(0, rm);
2587
                    gen_op_vfp_mrs();
2588
                    gen_movl_reg_T0(s, rn);
2589
                    gen_mov_F0_vreg(0, rm + 1);
2590
                    gen_op_vfp_mrs();
2591
                    gen_movl_reg_T0(s, rd);
2592
                }
2593
            } else {
2594
                /* arm->vfp */
2595
                if (dp) {
2596
                    gen_movl_T0_reg(s, rd);
2597
                    gen_movl_T1_reg(s, rn);
2598
                    gen_op_vfp_mdrr();
2599
                    gen_mov_vreg_F0(1, rm);
2600
                } else {
2601
                    gen_movl_T0_reg(s, rn);
2602
                    gen_op_vfp_msr();
2603
                    gen_mov_vreg_F0(0, rm);
2604
                    gen_movl_T0_reg(s, rd);
2605
                    gen_op_vfp_msr();
2606
                    gen_mov_vreg_F0(0, rm + 1);
2607
                }
2608
            }
2609
        } else {
2610
            /* Load/store */
2611
            rn = (insn >> 16) & 0xf;
2612
            if (dp)
2613
                VFP_DREG_D(rd, insn);
2614
            else
2615
                rd = VFP_SREG_D(insn);
2616
            if (s->thumb && rn == 15) {
2617
                gen_op_movl_T1_im(s->pc & ~2);
2618
            } else {
2619
                gen_movl_T1_reg(s, rn);
2620
            }
2621
            if ((insn & 0x01200000) == 0x01000000) {
2622
                /* Single load/store */
2623
                offset = (insn & 0xff) << 2;
2624
                if ((insn & (1 << 23)) == 0)
2625
                    offset = -offset;
2626
                gen_op_addl_T1_im(offset);
2627
                if (insn & (1 << 20)) {
2628
                    gen_vfp_ld(s, dp);
2629
                    gen_mov_vreg_F0(dp, rd);
2630
                } else {
2631
                    gen_mov_F0_vreg(dp, rd);
2632
                    gen_vfp_st(s, dp);
2633
                }
2634
            } else {
2635
                /* load/store multiple */
2636
                if (dp)
2637
                    n = (insn >> 1) & 0x7f;
2638
                else
2639
                    n = insn & 0xff;
2640

    
2641
                if (insn & (1 << 24)) /* pre-decrement */
2642
                    gen_op_addl_T1_im(-((insn & 0xff) << 2));
2643

    
2644
                if (dp)
2645
                    offset = 8;
2646
                else
2647
                    offset = 4;
2648
                for (i = 0; i < n; i++) {
2649
                    if (insn & ARM_CP_RW_BIT) {
2650
                        /* load */
2651
                        gen_vfp_ld(s, dp);
2652
                        gen_mov_vreg_F0(dp, rd + i);
2653
                    } else {
2654
                        /* store */
2655
                        gen_mov_F0_vreg(dp, rd + i);
2656
                        gen_vfp_st(s, dp);
2657
                    }
2658
                    gen_op_addl_T1_im(offset);
2659
                }
2660
                if (insn & (1 << 21)) {
2661
                    /* writeback */
2662
                    if (insn & (1 << 24))
2663
                        offset = -offset * n;
2664
                    else if (dp && (insn & 1))
2665
                        offset = 4;
2666
                    else
2667
                        offset = 0;
2668

    
2669
                    if (offset != 0)
2670
                        gen_op_addl_T1_im(offset);
2671
                    gen_movl_reg_T1(s, rn);
2672
                }
2673
            }
2674
        }
2675
        break;
2676
    default:
2677
        /* Should never happen.  */
2678
        return 1;
2679
    }
2680
    return 0;
2681
}
2682

    
2683
static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
2684
{
2685
    TranslationBlock *tb;
2686

    
2687
    tb = s->tb;
2688
    if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
2689
        tcg_gen_goto_tb(n);
2690
        gen_op_movl_T0_im(dest);
2691
        gen_set_pc_T0();
2692
        tcg_gen_exit_tb((long)tb + n);
2693
    } else {
2694
        gen_op_movl_T0_im(dest);
2695
        gen_set_pc_T0();
2696
        tcg_gen_exit_tb(0);
2697
    }
2698
}
2699

    
2700
static inline void gen_jmp (DisasContext *s, uint32_t dest)
2701
{
2702
    if (__builtin_expect(s->singlestep_enabled, 0)) {
2703
        /* An indirect jump so that we still trigger the debug exception.  */
2704
        if (s->thumb)
2705
          dest |= 1;
2706
        gen_op_movl_T0_im(dest);
2707
        gen_bx(s);
2708
    } else {
2709
        gen_goto_tb(s, 0, dest);
2710
        s->is_jmp = DISAS_TB_JUMP;
2711
    }
2712
}
2713

    
2714
static inline void gen_mulxy(int x, int y)
2715
{
2716
    if (x)
2717
        tcg_gen_sari_i32(cpu_T[0], cpu_T[0], 16);
2718
    else
2719
        gen_sxth(cpu_T[0]);
2720
    if (y)
2721
        gen_op_sarl_T1_im(16);
2722
    else
2723
        gen_sxth(cpu_T[1]);
2724
    gen_op_mul_T0_T1();
2725
}
2726

    
2727
/* Return the mask of PSR bits set by a MSR instruction.  */
2728
static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
2729
    uint32_t mask;
2730

    
2731
    mask = 0;
2732
    if (flags & (1 << 0))
2733
        mask |= 0xff;
2734
    if (flags & (1 << 1))
2735
        mask |= 0xff00;
2736
    if (flags & (1 << 2))
2737
        mask |= 0xff0000;
2738
    if (flags & (1 << 3))
2739
        mask |= 0xff000000;
2740

    
2741
    /* Mask out undefined bits.  */
2742
    mask &= ~CPSR_RESERVED;
2743
    if (!arm_feature(env, ARM_FEATURE_V6))
2744
        mask &= ~(CPSR_E | CPSR_GE);
2745
    if (!arm_feature(env, ARM_FEATURE_THUMB2))
2746
        mask &= ~CPSR_IT;
2747
    /* Mask out execution state bits.  */
2748
    if (!spsr)
2749
        mask &= ~CPSR_EXEC;
2750
    /* Mask out privileged bits.  */
2751
    if (IS_USER(s))
2752
        mask &= CPSR_USER;
2753
    return mask;
2754
}
2755

    
2756
/* Returns nonzero if access to the PSR is not permitted.  */
2757
static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
2758
{
2759
    if (spsr) {
2760
        /* ??? This is also undefined in system mode.  */
2761
        if (IS_USER(s))
2762
            return 1;
2763
        gen_op_movl_spsr_T0(mask);
2764
    } else {
2765
        gen_op_movl_cpsr_T0(mask);
2766
    }
2767
    gen_lookup_tb(s);
2768
    return 0;
2769
}
2770

    
2771
/* Generate an old-style exception return.  */
2772
static void gen_exception_return(DisasContext *s)
2773
{
2774
    gen_set_pc_T0();
2775
    gen_op_movl_T0_spsr();
2776
    gen_op_movl_cpsr_T0(0xffffffff);
2777
    s->is_jmp = DISAS_UPDATE;
2778
}
2779

    
2780
/* Generate a v6 exception return.  */
2781
static void gen_rfe(DisasContext *s)
2782
{
2783
    gen_op_movl_cpsr_T0(0xffffffff);
2784
    gen_op_movl_T0_T2();
2785
    gen_set_pc_T0();
2786
    s->is_jmp = DISAS_UPDATE;
2787
}
2788

    
2789
static inline void
2790
gen_set_condexec (DisasContext *s)
2791
{
2792
    if (s->condexec_mask) {
2793
        uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
2794
        TCGv tmp = new_tmp();
2795
        tcg_gen_movi_i32(tmp, val);
2796
        tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, condexec_bits));
2797
        dead_tmp(tmp);
2798
    }
2799
}
2800

    
2801
static void gen_nop_hint(DisasContext *s, int val)
2802
{
2803
    switch (val) {
2804
    case 3: /* wfi */
2805
        gen_op_movl_T0_im((long)s->pc);
2806
        gen_set_pc_T0();
2807
        s->is_jmp = DISAS_WFI;
2808
        break;
2809
    case 2: /* wfe */
2810
    case 4: /* sev */
2811
        /* TODO: Implement SEV and WFE.  May help SMP performance.  */
2812
    default: /* nop */
2813
        break;
2814
    }
2815
}
2816

    
2817
/* Neon shift by constant.  The actual ops are the same as used for variable
2818
   shifts.  [OP][U][SIZE]  */
2819
static GenOpFunc *gen_neon_shift_im[8][2][4] = {
2820
    { /* 0 */ /* VSHR */
2821
      {
2822
        gen_op_neon_shl_u8,
2823
        gen_op_neon_shl_u16,
2824
        gen_op_neon_shl_u32,
2825
        gen_op_neon_shl_u64
2826
      }, {
2827
        gen_op_neon_shl_s8,
2828
        gen_op_neon_shl_s16,
2829
        gen_op_neon_shl_s32,
2830
        gen_op_neon_shl_s64
2831
      }
2832
    }, { /* 1 */ /* VSRA */
2833
      {
2834
        gen_op_neon_shl_u8,
2835
        gen_op_neon_shl_u16,
2836
        gen_op_neon_shl_u32,
2837
        gen_op_neon_shl_u64
2838
      }, {
2839
        gen_op_neon_shl_s8,
2840
        gen_op_neon_shl_s16,
2841
        gen_op_neon_shl_s32,
2842
        gen_op_neon_shl_s64
2843
      }
2844
    }, { /* 2 */ /* VRSHR */
2845
      {
2846
        gen_op_neon_rshl_u8,
2847
        gen_op_neon_rshl_u16,
2848
        gen_op_neon_rshl_u32,
2849
        gen_op_neon_rshl_u64
2850
      }, {
2851
        gen_op_neon_rshl_s8,
2852
        gen_op_neon_rshl_s16,
2853
        gen_op_neon_rshl_s32,
2854
        gen_op_neon_rshl_s64
2855
      }
2856
    }, { /* 3 */ /* VRSRA */
2857
      {
2858
        gen_op_neon_rshl_u8,
2859
        gen_op_neon_rshl_u16,
2860
        gen_op_neon_rshl_u32,
2861
        gen_op_neon_rshl_u64
2862
      }, {
2863
        gen_op_neon_rshl_s8,
2864
        gen_op_neon_rshl_s16,
2865
        gen_op_neon_rshl_s32,
2866
        gen_op_neon_rshl_s64
2867
      }
2868
    }, { /* 4 */
2869
      {
2870
        NULL, NULL, NULL, NULL
2871
      }, { /* VSRI */
2872
        gen_op_neon_shl_u8,
2873
        gen_op_neon_shl_u16,
2874
        gen_op_neon_shl_u32,
2875
        gen_op_neon_shl_u64,
2876
      }
2877
    }, { /* 5 */
2878
      { /* VSHL */
2879
        gen_op_neon_shl_u8,
2880
        gen_op_neon_shl_u16,
2881
        gen_op_neon_shl_u32,
2882
        gen_op_neon_shl_u64,
2883
      }, { /* VSLI */
2884
        gen_op_neon_shl_u8,
2885
        gen_op_neon_shl_u16,
2886
        gen_op_neon_shl_u32,
2887
        gen_op_neon_shl_u64,
2888
      }
2889
    }, { /* 6 */ /* VQSHL */
2890
      {
2891
        gen_op_neon_qshl_u8,
2892
        gen_op_neon_qshl_u16,
2893
        gen_op_neon_qshl_u32,
2894
        gen_op_neon_qshl_u64
2895
      }, {
2896
        gen_op_neon_qshl_s8,
2897
        gen_op_neon_qshl_s16,
2898
        gen_op_neon_qshl_s32,
2899
        gen_op_neon_qshl_s64
2900
      }
2901
    }, { /* 7 */ /* VQSHLU */
2902
      {
2903
        gen_op_neon_qshl_u8,
2904
        gen_op_neon_qshl_u16,
2905
        gen_op_neon_qshl_u32,
2906
        gen_op_neon_qshl_u64
2907
      }, {
2908
        gen_op_neon_qshl_u8,
2909
        gen_op_neon_qshl_u16,
2910
        gen_op_neon_qshl_u32,
2911
        gen_op_neon_qshl_u64
2912
      }
2913
    }
2914
};
2915

    
2916
/* [R][U][size - 1] */
2917
static GenOpFunc *gen_neon_shift_im_narrow[2][2][3] = {
2918
    {
2919
      {
2920
        gen_op_neon_shl_u16,
2921
        gen_op_neon_shl_u32,
2922
        gen_op_neon_shl_u64
2923
      }, {
2924
        gen_op_neon_shl_s16,
2925
        gen_op_neon_shl_s32,
2926
        gen_op_neon_shl_s64
2927
      }
2928
    }, {
2929
      {
2930
        gen_op_neon_rshl_u16,
2931
        gen_op_neon_rshl_u32,
2932
        gen_op_neon_rshl_u64
2933
      }, {
2934
        gen_op_neon_rshl_s16,
2935
        gen_op_neon_rshl_s32,
2936
        gen_op_neon_rshl_s64
2937
      }
2938
    }
2939
};
2940

    
2941
static inline void
2942
gen_op_neon_narrow_u32 ()
2943
{
2944
    /* No-op.  */
2945
}
2946

    
2947
static GenOpFunc *gen_neon_narrow[3] = {
2948
    gen_op_neon_narrow_u8,
2949
    gen_op_neon_narrow_u16,
2950
    gen_op_neon_narrow_u32
2951
};
2952

    
2953
static GenOpFunc *gen_neon_narrow_satu[3] = {
2954
    gen_op_neon_narrow_sat_u8,
2955
    gen_op_neon_narrow_sat_u16,
2956
    gen_op_neon_narrow_sat_u32
2957
};
2958

    
2959
static GenOpFunc *gen_neon_narrow_sats[3] = {
2960
    gen_op_neon_narrow_sat_s8,
2961
    gen_op_neon_narrow_sat_s16,
2962
    gen_op_neon_narrow_sat_s32
2963
};
2964

    
2965
static inline int gen_neon_add(int size)
2966
{
2967
    switch (size) {
2968
    case 0: gen_op_neon_add_u8(); break;
2969
    case 1: gen_op_neon_add_u16(); break;
2970
    case 2: gen_op_addl_T0_T1(); break;
2971
    default: return 1;
2972
    }
2973
    return 0;
2974
}
2975

    
2976
/* 32-bit pairwise ops end up the same as the elementsise versions.  */
2977
#define gen_op_neon_pmax_s32  gen_op_neon_max_s32
2978
#define gen_op_neon_pmax_u32  gen_op_neon_max_u32
2979
#define gen_op_neon_pmin_s32  gen_op_neon_min_s32
2980
#define gen_op_neon_pmin_u32  gen_op_neon_min_u32
2981

    
2982
#define GEN_NEON_INTEGER_OP(name) do { \
2983
    switch ((size << 1) | u) { \
2984
    case 0: gen_op_neon_##name##_s8(); break; \
2985
    case 1: gen_op_neon_##name##_u8(); break; \
2986
    case 2: gen_op_neon_##name##_s16(); break; \
2987
    case 3: gen_op_neon_##name##_u16(); break; \
2988
    case 4: gen_op_neon_##name##_s32(); break; \
2989
    case 5: gen_op_neon_##name##_u32(); break; \
2990
    default: return 1; \
2991
    }} while (0)
2992

    
2993
static inline void
2994
gen_neon_movl_scratch_T0(int scratch)
2995
{
2996
  uint32_t offset;
2997

    
2998
  offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2999
  gen_op_neon_setreg_T0(offset);
3000
}
3001

    
3002
static inline void
3003
gen_neon_movl_scratch_T1(int scratch)
3004
{
3005
  uint32_t offset;
3006

    
3007
  offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3008
  gen_op_neon_setreg_T1(offset);
3009
}
3010

    
3011
static inline void
3012
gen_neon_movl_T0_scratch(int scratch)
3013
{
3014
  uint32_t offset;
3015

    
3016
  offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3017
  gen_op_neon_getreg_T0(offset);
3018
}
3019

    
3020
static inline void
3021
gen_neon_movl_T1_scratch(int scratch)
3022
{
3023
  uint32_t offset;
3024

    
3025
  offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3026
  gen_op_neon_getreg_T1(offset);
3027
}
3028

    
3029
static inline void gen_op_neon_widen_u32(void)
3030
{
3031
    gen_op_movl_T1_im(0);
3032
}
3033

    
3034
static inline void gen_neon_get_scalar(int size, int reg)
3035
{
3036
    if (size == 1) {
3037
        NEON_GET_REG(T0, reg >> 1, reg & 1);
3038
    } else {
3039
        NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3040
        if (reg & 1)
3041
            gen_op_neon_dup_low16();
3042
        else
3043
            gen_op_neon_dup_high16();
3044
    }
3045
}
3046

    
3047
static void gen_neon_unzip(int reg, int q, int tmp, int size)
3048
{
3049
    int n;
3050

    
3051
    for (n = 0; n < q + 1; n += 2) {
3052
        NEON_GET_REG(T0, reg, n);
3053
        NEON_GET_REG(T0, reg, n + n);
3054
        switch (size) {
3055
        case 0: gen_op_neon_unzip_u8(); break;
3056
        case 1: gen_op_neon_zip_u16(); break; /* zip and unzip are the same.  */
3057
        case 2: /* no-op */; break;
3058
        default: abort();
3059
        }
3060
        gen_neon_movl_scratch_T0(tmp + n);
3061
        gen_neon_movl_scratch_T1(tmp + n + 1);
3062
    }
3063
}
3064

    
3065
static struct {
3066
    int nregs;
3067
    int interleave;
3068
    int spacing;
3069
} neon_ls_element_type[11] = {
3070
    {4, 4, 1},
3071
    {4, 4, 2},
3072
    {4, 1, 1},
3073
    {4, 2, 1},
3074
    {3, 3, 1},
3075
    {3, 3, 2},
3076
    {3, 1, 1},
3077
    {1, 1, 1},
3078
    {2, 2, 1},
3079
    {2, 2, 2},
3080
    {2, 1, 1}
3081
};
3082

    
3083
/* Translate a NEON load/store element instruction.  Return nonzero if the
3084
   instruction is invalid.  */
3085
static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3086
{
3087
    int rd, rn, rm;
3088
    int op;
3089
    int nregs;
3090
    int interleave;
3091
    int stride;
3092
    int size;
3093
    int reg;
3094
    int pass;
3095
    int load;
3096
    int shift;
3097
    uint32_t mask;
3098
    int n;
3099

    
3100
    if (!vfp_enabled(env))
3101
      return 1;
3102
    VFP_DREG_D(rd, insn);
3103
    rn = (insn >> 16) & 0xf;
3104
    rm = insn & 0xf;
3105
    load = (insn & (1 << 21)) != 0;
3106
    if ((insn & (1 << 23)) == 0) {
3107
        /* Load store all elements.  */
3108
        op = (insn >> 8) & 0xf;
3109
        size = (insn >> 6) & 3;
3110
        if (op > 10 || size == 3)
3111
            return 1;
3112
        nregs = neon_ls_element_type[op].nregs;
3113
        interleave = neon_ls_element_type[op].interleave;
3114
        gen_movl_T1_reg(s, rn);
3115
        stride = (1 << size) * interleave;
3116
        for (reg = 0; reg < nregs; reg++) {
3117
            if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3118
                gen_movl_T1_reg(s, rn);
3119
                gen_op_addl_T1_im((1 << size) * reg);
3120
            } else if (interleave == 2 && nregs == 4 && reg == 2) {
3121
                gen_movl_T1_reg(s, rn);
3122
                gen_op_addl_T1_im(1 << size);
3123
            }
3124
            for (pass = 0; pass < 2; pass++) {
3125
                if (size == 2) {
3126
                    if (load) {
3127
                        gen_ldst(ldl, s);
3128
                        NEON_SET_REG(T0, rd, pass);
3129
                    } else {
3130
                        NEON_GET_REG(T0, rd, pass);
3131
                        gen_ldst(stl, s);
3132
                    }
3133
                    gen_op_addl_T1_im(stride);
3134
                } else if (size == 1) {
3135
                    if (load) {
3136
                        gen_ldst(lduw, s);
3137
                        gen_op_addl_T1_im(stride);
3138
                        gen_op_movl_T2_T0();
3139
                        gen_ldst(lduw, s);
3140
                        gen_op_addl_T1_im(stride);
3141
                        gen_op_neon_insert_elt(16, 0xffff);
3142
                        NEON_SET_REG(T2, rd, pass);
3143
                    } else {
3144
                        NEON_GET_REG(T2, rd, pass);
3145
                        gen_op_movl_T0_T2();
3146
                        gen_ldst(stw, s);
3147
                        gen_op_addl_T1_im(stride);
3148
                        gen_op_neon_extract_elt(16, 0xffff0000);
3149
                        gen_ldst(stw, s);
3150
                        gen_op_addl_T1_im(stride);
3151
                    }
3152
                } else /* size == 0 */ {
3153
                    if (load) {
3154
                        mask = 0xff;
3155
                        for (n = 0; n < 4; n++) {
3156
                            gen_ldst(ldub, s);
3157
                            gen_op_addl_T1_im(stride);
3158
                            if (n == 0) {
3159
                                gen_op_movl_T2_T0();
3160
                            } else {
3161
                                gen_op_neon_insert_elt(n * 8, ~mask);
3162
                            }
3163
                            mask <<= 8;
3164
                        }
3165
                        NEON_SET_REG(T2, rd, pass);
3166
                    } else {
3167
                        NEON_GET_REG(T2, rd, pass);
3168
                        mask = 0xff;
3169
                        for (n = 0; n < 4; n++) {
3170
                            if (n == 0) {
3171
                                gen_op_movl_T0_T2();
3172
                            } else {
3173
                                gen_op_neon_extract_elt(n * 8, mask);
3174
                            }
3175
                            gen_ldst(stb, s);
3176
                            gen_op_addl_T1_im(stride);
3177
                            mask <<= 8;
3178
                        }
3179
                    }
3180
                }
3181
            }
3182
            rd += neon_ls_element_type[op].spacing;
3183
        }
3184
        stride = nregs * 8;
3185
    } else {
3186
        size = (insn >> 10) & 3;
3187
        if (size == 3) {
3188
            /* Load single element to all lanes.  */
3189
            if (!load)
3190
                return 1;
3191
            size = (insn >> 6) & 3;
3192
            nregs = ((insn >> 8) & 3) + 1;
3193
            stride = (insn & (1 << 5)) ? 2 : 1;
3194
            gen_movl_T1_reg(s, rn);
3195
            for (reg = 0; reg < nregs; reg++) {
3196
                switch (size) {
3197
                case 0:
3198
                    gen_ldst(ldub, s);
3199
                    gen_op_neon_dup_u8(0);
3200
                    break;
3201
                case 1:
3202
                    gen_ldst(lduw, s);
3203
                    gen_op_neon_dup_low16();
3204
                    break;
3205
                case 2:
3206
                    gen_ldst(ldl, s);
3207
                    break;
3208
                case 3:
3209
                    return 1;
3210
                }
3211
                gen_op_addl_T1_im(1 << size);
3212
                NEON_SET_REG(T0, rd, 0);
3213
                NEON_SET_REG(T0, rd, 1);
3214
                rd += stride;
3215
            }
3216
            stride = (1 << size) * nregs;
3217
        } else {
3218
            /* Single element.  */
3219
            pass = (insn >> 7) & 1;
3220
            switch (size) {
3221
            case 0:
3222
                shift = ((insn >> 5) & 3) * 8;
3223
                mask = 0xff << shift;
3224
                stride = 1;
3225
                break;
3226
            case 1:
3227
                shift = ((insn >> 6) & 1) * 16;
3228
                mask = shift ? 0xffff0000 : 0xffff;
3229
                stride = (insn & (1 << 5)) ? 2 : 1;
3230
                break;
3231
            case 2:
3232
                shift = 0;
3233
                mask = 0xffffffff;
3234
                stride = (insn & (1 << 6)) ? 2 : 1;
3235
                break;
3236
            default:
3237
                abort();
3238
            }
3239
            nregs = ((insn >> 8) & 3) + 1;
3240
            gen_movl_T1_reg(s, rn);
3241
            for (reg = 0; reg < nregs; reg++) {
3242
                if (load) {
3243
                    if (size != 2) {
3244
                        NEON_GET_REG(T2, rd, pass);
3245
                    }
3246
                    switch (size) {
3247
                    case 0:
3248
                        gen_ldst(ldub, s);
3249
                        break;
3250
                    case 1:
3251
                        gen_ldst(lduw, s);
3252
                        break;
3253
                    case 2:
3254
                        gen_ldst(ldl, s);
3255
                        NEON_SET_REG(T0, rd, pass);
3256
                        break;
3257
                    }
3258
                    if (size != 2) {
3259
                        gen_op_neon_insert_elt(shift, ~mask);
3260
                        NEON_SET_REG(T0, rd, pass);
3261
                    }
3262
                } else { /* Store */
3263
                    if (size == 2) {
3264
                        NEON_GET_REG(T0, rd, pass);
3265
                    } else {
3266
                        NEON_GET_REG(T2, rd, pass);
3267
                        gen_op_neon_extract_elt(shift, mask);
3268
                    }
3269
                    switch (size) {
3270
                    case 0:
3271
                        gen_ldst(stb, s);
3272
                        break;
3273
                    case 1:
3274
                        gen_ldst(stw, s);
3275
                        break;
3276
                    case 2:
3277
                        gen_ldst(stl, s);
3278
                        break;
3279
                    }
3280
                }
3281
                rd += stride;
3282
                gen_op_addl_T1_im(1 << size);
3283
            }
3284
            stride = nregs * (1 << size);
3285
        }
3286
    }
3287
    if (rm != 15) {
3288
        TCGv base;
3289

    
3290
        base = load_reg(s, rn);
3291
        if (rm == 13) {
3292
            tcg_gen_addi_i32(base, base, stride);
3293
        } else {
3294
            TCGv index;
3295
            index = load_reg(s, rm);
3296
            tcg_gen_add_i32(base, base, index);
3297
            dead_tmp(index);
3298
        }
3299
        store_reg(s, rn, base);
3300
    }
3301
    return 0;
3302
}
3303

    
3304
/* Translate a NEON data processing instruction.  Return nonzero if the
3305
   instruction is invalid.
3306
   In general we process vectors in 32-bit chunks.  This means we can reuse
3307
   some of the scalar ops, and hopefully the code generated for 32-bit
3308
   hosts won't be too awful.  The downside is that the few 64-bit operations
3309
   (mainly shifts) get complicated.  */
3310

    
3311
static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
3312
{
3313
    int op;
3314
    int q;
3315
    int rd, rn, rm;
3316
    int size;
3317
    int shift;
3318
    int pass;
3319
    int count;
3320
    int pairwise;
3321
    int u;
3322
    int n;
3323
    uint32_t imm;
3324

    
3325
    if (!vfp_enabled(env))
3326
      return 1;
3327
    q = (insn & (1 << 6)) != 0;
3328
    u = (insn >> 24) & 1;
3329
    VFP_DREG_D(rd, insn);
3330
    VFP_DREG_N(rn, insn);
3331
    VFP_DREG_M(rm, insn);
3332
    size = (insn >> 20) & 3;
3333
    if ((insn & (1 << 23)) == 0) {
3334
        /* Three register same length.  */
3335
        op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
3336
        if (size == 3 && (op == 1 || op == 5 || op == 16)) {
3337
            for (pass = 0; pass < (q ? 2 : 1); pass++) {
3338
                NEON_GET_REG(T0, rm, pass * 2);
3339
                NEON_GET_REG(T1, rm, pass * 2 + 1);
3340
                gen_neon_movl_scratch_T0(0);
3341
                gen_neon_movl_scratch_T1(1);
3342
                NEON_GET_REG(T0, rn, pass * 2);
3343
                NEON_GET_REG(T1, rn, pass * 2 + 1);
3344
                switch (op) {
3345
                case 1: /* VQADD */
3346
                    if (u) {
3347
                        gen_op_neon_addl_saturate_u64();
3348
                    } else {
3349
                        gen_op_neon_addl_saturate_s64();
3350
                    }
3351
                    break;
3352
                case 5: /* VQSUB */
3353
                    if (u) {
3354
                        gen_op_neon_subl_saturate_u64();
3355
                    } else {
3356
                        gen_op_neon_subl_saturate_s64();
3357
                    }
3358
                    break;
3359
                case 16:
3360
                    if (u) {
3361
                        gen_op_neon_subl_u64();
3362
                    } else {
3363
                        gen_op_neon_addl_u64();
3364
                    }
3365
                    break;
3366
                default:
3367
                    abort();
3368
                }
3369
                NEON_SET_REG(T0, rd, pass * 2);
3370
                NEON_SET_REG(T1, rd, pass * 2 + 1);
3371
            }
3372
            return 0;
3373
        }
3374
        switch (op) {
3375
        case 8: /* VSHL */
3376
        case 9: /* VQSHL */
3377
        case 10: /* VRSHL */
3378
        case 11: /* VQSHL */
3379
            /* Shift operations have Rn and Rm reversed.  */
3380
            {
3381
                int tmp;
3382
                tmp = rn;
3383
                rn = rm;
3384
                rm = tmp;
3385
                pairwise = 0;
3386
            }
3387
            break;
3388
        case 20: /* VPMAX */
3389
        case 21: /* VPMIN */
3390
        case 23: /* VPADD */
3391
            pairwise = 1;
3392
            break;
3393
        case 26: /* VPADD (float) */
3394
            pairwise = (u && size < 2);
3395
            break;
3396
        case 30: /* VPMIN/VPMAX (float) */
3397
            pairwise = u;
3398
            break;
3399
        default:
3400
            pairwise = 0;
3401
            break;
3402
        }
3403
        for (pass = 0; pass < (q ? 4 : 2); pass++) {
3404

    
3405
        if (pairwise) {
3406
            /* Pairwise.  */
3407
            if (q)
3408
                n = (pass & 1) * 2;
3409
            else
3410
                n = 0;
3411
            if (pass < q + 1) {
3412
                NEON_GET_REG(T0, rn, n);
3413
                NEON_GET_REG(T1, rn, n + 1);
3414
            } else {
3415
                NEON_GET_REG(T0, rm, n);
3416
                NEON_GET_REG(T1, rm, n + 1);
3417
            }
3418
        } else {
3419
            /* Elementwise.  */
3420
            NEON_GET_REG(T0, rn, pass);
3421
            NEON_GET_REG(T1, rm, pass);
3422
        }
3423
        switch (op) {
3424
        case 0: /* VHADD */
3425
            GEN_NEON_INTEGER_OP(hadd);
3426
            break;
3427
        case 1: /* VQADD */
3428
            switch (size << 1| u) {
3429
            case 0: gen_op_neon_qadd_s8(); break;
3430
            case 1: gen_op_neon_qadd_u8(); break;
3431
            case 2: gen_op_neon_qadd_s16(); break;
3432
            case 3: gen_op_neon_qadd_u16(); break;
3433
            case 4: gen_op_addl_T0_T1_saturate(); break;
3434
            case 5: gen_op_addl_T0_T1_usaturate(); break;
3435
            default: abort();
3436
            }
3437
            break;
3438
        case 2: /* VRHADD */
3439
            GEN_NEON_INTEGER_OP(rhadd);
3440
            break;
3441
        case 3: /* Logic ops.  */
3442
            switch ((u << 2) | size) {
3443
            case 0: /* VAND */
3444
                gen_op_andl_T0_T1();
3445
                break;
3446
            case 1: /* BIC */
3447
                gen_op_bicl_T0_T1();
3448
                break;
3449
            case 2: /* VORR */
3450
                gen_op_orl_T0_T1();
3451
                break;
3452
            case 3: /* VORN */
3453
                gen_op_notl_T1();
3454
                gen_op_orl_T0_T1();
3455
                break;
3456
            case 4: /* VEOR */
3457
                gen_op_xorl_T0_T1();
3458
                break;
3459
            case 5: /* VBSL */
3460
                NEON_GET_REG(T2, rd, pass);
3461
                gen_op_neon_bsl();
3462
                break;
3463
            case 6: /* VBIT */
3464
                NEON_GET_REG(T2, rd, pass);
3465
                gen_op_neon_bit();
3466
                break;
3467
            case 7: /* VBIF */
3468
                NEON_GET_REG(T2, rd, pass);
3469
                gen_op_neon_bif();
3470
                break;
3471
            }
3472
            break;
3473
        case 4: /* VHSUB */
3474
            GEN_NEON_INTEGER_OP(hsub);
3475
            break;
3476
        case 5: /* VQSUB */
3477
            switch ((size << 1) | u) {
3478
            case 0: gen_op_neon_qsub_s8(); break;
3479
            case 1: gen_op_neon_qsub_u8(); break;
3480
            case 2: gen_op_neon_qsub_s16(); break;
3481
            case 3: gen_op_neon_qsub_u16(); break;
3482
            case 4: gen_op_subl_T0_T1_saturate(); break;
3483
            case 5: gen_op_subl_T0_T1_usaturate(); break;
3484
            default: abort();
3485
            }
3486
            break;
3487
        case 6: /* VCGT */
3488
            GEN_NEON_INTEGER_OP(cgt);
3489
            break;
3490
        case 7: /* VCGE */
3491
            GEN_NEON_INTEGER_OP(cge);
3492
            break;
3493
        case 8: /* VSHL */
3494
            switch ((size << 1) | u) {
3495
            case 0: gen_op_neon_shl_s8(); break;
3496
            case 1: gen_op_neon_shl_u8(); break;
3497
            case 2: gen_op_neon_shl_s16(); break;
3498
            case 3: gen_op_neon_shl_u16(); break;
3499
            case 4: gen_op_neon_shl_s32(); break;
3500
            case 5: gen_op_neon_shl_u32(); break;
3501
#if 0
3502
            /* ??? Implementing these is tricky because the vector ops work
3503
               on 32-bit pieces.  */
3504
            case 6: gen_op_neon_shl_s64(); break;
3505
            case 7: gen_op_neon_shl_u64(); break;
3506
#else
3507
            case 6: case 7: cpu_abort(env, "VSHL.64 not implemented");
3508
#endif
3509
            }
3510
            break;
3511
        case 9: /* VQSHL */
3512
            switch ((size << 1) | u) {
3513
            case 0: gen_op_neon_qshl_s8(); break;
3514
            case 1: gen_op_neon_qshl_u8(); break;
3515
            case 2: gen_op_neon_qshl_s16(); break;
3516
            case 3: gen_op_neon_qshl_u16(); break;
3517
            case 4: gen_op_neon_qshl_s32(); break;
3518
            case 5: gen_op_neon_qshl_u32(); break;
3519
#if 0
3520
            /* ??? Implementing these is tricky because the vector ops work
3521
               on 32-bit pieces.  */
3522
            case 6: gen_op_neon_qshl_s64(); break;
3523
            case 7: gen_op_neon_qshl_u64(); break;
3524
#else
3525
            case 6: case 7: cpu_abort(env, "VQSHL.64 not implemented");
3526
#endif
3527
            }
3528
            break;
3529
        case 10: /* VRSHL */
3530
            switch ((size << 1) | u) {
3531
            case 0: gen_op_neon_rshl_s8(); break;
3532
            case 1: gen_op_neon_rshl_u8(); break;
3533
            case 2: gen_op_neon_rshl_s16(); break;
3534
            case 3: gen_op_neon_rshl_u16(); break;
3535
            case 4: gen_op_neon_rshl_s32(); break;
3536
            case 5: gen_op_neon_rshl_u32(); break;
3537
#if 0
3538
            /* ??? Implementing these is tricky because the vector ops work
3539
               on 32-bit pieces.  */
3540
            case 6: gen_op_neon_rshl_s64(); break;
3541
            case 7: gen_op_neon_rshl_u64(); break;
3542
#else
3543
            case 6: case 7: cpu_abort(env, "VRSHL.64 not implemented");
3544
#endif
3545
            }
3546
            break;
3547
        case 11: /* VQRSHL */
3548
            switch ((size << 1) | u) {
3549
            case 0: gen_op_neon_qrshl_s8(); break;
3550
            case 1: gen_op_neon_qrshl_u8(); break;
3551
            case 2: gen_op_neon_qrshl_s16(); break;
3552
            case 3: gen_op_neon_qrshl_u16(); break;
3553
            case 4: gen_op_neon_qrshl_s32(); break;
3554
            case 5: gen_op_neon_qrshl_u32(); break;
3555
#if 0
3556
            /* ??? Implementing these is tricky because the vector ops work
3557
               on 32-bit pieces.  */
3558
            case 6: gen_op_neon_qrshl_s64(); break;
3559
            case 7: gen_op_neon_qrshl_u64(); break;
3560
#else
3561
            case 6: case 7: cpu_abort(env, "VQRSHL.64 not implemented");
3562
#endif
3563
            }
3564
            break;
3565
        case 12: /* VMAX */
3566
            GEN_NEON_INTEGER_OP(max);
3567
            break;
3568
        case 13: /* VMIN */
3569
            GEN_NEON_INTEGER_OP(min);
3570
            break;
3571
        case 14: /* VABD */
3572
            GEN_NEON_INTEGER_OP(abd);
3573
            break;
3574
        case 15: /* VABA */
3575
            GEN_NEON_INTEGER_OP(abd);
3576
            NEON_GET_REG(T1, rd, pass);
3577
            gen_neon_add(size);
3578
            break;
3579
        case 16:
3580
            if (!u) { /* VADD */
3581
                if (gen_neon_add(size))
3582
                    return 1;
3583
            } else { /* VSUB */
3584
                switch (size) {
3585
                case 0: gen_op_neon_sub_u8(); break;
3586
                case 1: gen_op_neon_sub_u16(); break;
3587
                case 2: gen_op_subl_T0_T1(); break;
3588
                default: return 1;
3589
                }
3590
            }
3591
            break;
3592
        case 17:
3593
            if (!u) { /* VTST */
3594
                switch (size) {
3595
                case 0: gen_op_neon_tst_u8(); break;
3596
                case 1: gen_op_neon_tst_u16(); break;
3597
                case 2: gen_op_neon_tst_u32(); break;
3598
                default: return 1;
3599
                }
3600
            } else { /* VCEQ */
3601
                switch (size) {
3602
                case 0: gen_op_neon_ceq_u8(); break;
3603
                case 1: gen_op_neon_ceq_u16(); break;
3604
                case 2: gen_op_neon_ceq_u32(); break;
3605
                default: return 1;
3606
                }
3607
            }
3608
            break;
3609
        case 18: /* Multiply.  */
3610
            switch (size) {
3611
            case 0: gen_op_neon_mul_u8(); break;
3612
            case 1: gen_op_neon_mul_u16(); break;
3613
            case 2: gen_op_mul_T0_T1(); break;
3614
            default: return 1;
3615
            }
3616
            NEON_GET_REG(T1, rd, pass);
3617
            if (u) { /* VMLS */
3618
                switch (size) {
3619
                case 0: gen_op_neon_rsb_u8(); break;
3620
                case 1: gen_op_neon_rsb_u16(); break;
3621
                case 2: gen_op_rsbl_T0_T1(); break;
3622
                default: return 1;
3623
                }
3624
            } else { /* VMLA */
3625
                gen_neon_add(size);
3626
            }
3627
            break;
3628
        case 19: /* VMUL */
3629
            if (u) { /* polynomial */
3630
                gen_op_neon_mul_p8();
3631
            } else { /* Integer */
3632
                switch (size) {
3633
                case 0: gen_op_neon_mul_u8(); break;
3634
                case 1: gen_op_neon_mul_u16(); break;
3635
                case 2: gen_op_mul_T0_T1(); break;
3636
                default: return 1;
3637
                }
3638
            }
3639
            break;
3640
        case 20: /* VPMAX */
3641
            GEN_NEON_INTEGER_OP(pmax);
3642
            break;
3643
        case 21: /* VPMIN */
3644
            GEN_NEON_INTEGER_OP(pmin);
3645
            break;
3646
        case 22: /* Hultiply high.  */
3647
            if (!u) { /* VQDMULH */
3648
                switch (size) {
3649
                case 1: gen_op_neon_qdmulh_s16(); break;
3650
                case 2: gen_op_neon_qdmulh_s32(); break;
3651
                default: return 1;
3652
                }
3653
            } else { /* VQRDHMUL */
3654
                switch (size) {
3655
                case 1: gen_op_neon_qrdmulh_s16(); break;
3656
                case 2: gen_op_neon_qrdmulh_s32(); break;
3657
                default: return 1;
3658
                }
3659
            }
3660
            break;
3661
        case 23: /* VPADD */
3662
            if (u)
3663
                return 1;
3664
            switch (size) {
3665
            case 0: gen_op_neon_padd_u8(); break;
3666
            case 1: gen_op_neon_padd_u16(); break;
3667
            case 2: gen_op_addl_T0_T1(); break;
3668
            default: return 1;
3669
            }
3670
            break;
3671
        case 26: /* Floating point arithnetic.  */
3672
            switch ((u << 2) | size) {
3673
            case 0: /* VADD */
3674
                gen_op_neon_add_f32();
3675
                break;
3676
            case 2: /* VSUB */
3677
                gen_op_neon_sub_f32();
3678
                break;
3679
            case 4: /* VPADD */
3680
                gen_op_neon_add_f32();
3681
                break;
3682
            case 6: /* VABD */
3683
                gen_op_neon_abd_f32();
3684
                break;
3685
            default:
3686
                return 1;
3687
            }
3688
            break;
3689
        case 27: /* Float multiply.  */
3690
            gen_op_neon_mul_f32();
3691
            if (!u) {
3692
                NEON_GET_REG(T1, rd, pass);
3693
                if (size == 0) {
3694
                    gen_op_neon_add_f32();
3695
                } else {
3696
                    gen_op_neon_rsb_f32();
3697
                }
3698
            }
3699
            break;
3700
        case 28: /* Float compare.  */
3701
            if (!u) {
3702
                gen_op_neon_ceq_f32();
3703
            } else {
3704
                if (size == 0)
3705
                    gen_op_neon_cge_f32();
3706
                else
3707
                    gen_op_neon_cgt_f32();
3708
            }
3709
            break;
3710
        case 29: /* Float compare absolute.  */
3711
            if (!u)
3712
                return 1;
3713
            if (size == 0)
3714
                gen_op_neon_acge_f32();
3715
            else
3716
                gen_op_neon_acgt_f32();
3717
            break;
3718
        case 30: /* Float min/max.  */
3719
            if (size == 0)
3720
                gen_op_neon_max_f32();
3721
            else
3722
                gen_op_neon_min_f32();
3723
            break;
3724
        case 31:
3725
            if (size == 0)
3726
                gen_op_neon_recps_f32();
3727
            else
3728
                gen_op_neon_rsqrts_f32();
3729
            break;
3730
        default:
3731
            abort();
3732
        }
3733
        /* Save the result.  For elementwise operations we can put it
3734
           straight into the destination register.  For pairwise operations
3735
           we have to be careful to avoid clobbering the source operands.  */
3736
        if (pairwise && rd == rm) {
3737
            gen_neon_movl_scratch_T0(pass);
3738
        } else {
3739
            NEON_SET_REG(T0, rd, pass);
3740
        }
3741

    
3742
        } /* for pass */
3743
        if (pairwise && rd == rm) {
3744
            for (pass = 0; pass < (q ? 4 : 2); pass++) {
3745
                gen_neon_movl_T0_scratch(pass);
3746
                NEON_SET_REG(T0, rd, pass);
3747
            }
3748
        }
3749
    } else if (insn & (1 << 4)) {
3750
        if ((insn & 0x00380080) != 0) {
3751
            /* Two registers and shift.  */
3752
            op = (insn >> 8) & 0xf;
3753
            if (insn & (1 << 7)) {
3754
                /* 64-bit shift.   */
3755
                size = 3;
3756
            } else {
3757
                size = 2;
3758
                while ((insn & (1 << (size + 19))) == 0)
3759
                    size--;
3760
            }
3761
            shift = (insn >> 16) & ((1 << (3 + size)) - 1);
3762
            /* To avoid excessive dumplication of ops we implement shift
3763
               by immediate using the variable shift operations.  */
3764
            if (op < 8) {
3765
                /* Shift by immediate:
3766
                   VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU.  */
3767
                /* Right shifts are encoded as N - shift, where N is the
3768
                   element size in bits.  */
3769
                if (op <= 4)
3770
                    shift = shift - (1 << (size + 3));
3771
                else
3772
                    shift++;
3773
                if (size == 3) {
3774
                    count = q + 1;
3775
                } else {
3776
                    count = q ? 4: 2;
3777
                }
3778
                switch (size) {
3779
                case 0:
3780
                    imm = (uint8_t) shift;
3781
                    imm |= imm << 8;
3782
                    imm |= imm << 16;
3783
                    break;
3784
                case 1:
3785
                    imm = (uint16_t) shift;
3786
                    imm |= imm << 16;
3787
                    break;
3788
                case 2:
3789
                case 3:
3790
                    imm = shift;
3791
                    break;
3792
                default:
3793
                    abort();
3794
                }
3795

    
3796
                for (pass = 0; pass < count; pass++) {
3797
                    if (size < 3) {
3798
                        /* Operands in T0 and T1.  */
3799
                        gen_op_movl_T1_im(imm);
3800
                        NEON_GET_REG(T0, rm, pass);
3801
                    } else {
3802
                        /* Operands in {T0, T1} and env->vfp.scratch.  */
3803
                        gen_op_movl_T0_im(imm);
3804
                        gen_neon_movl_scratch_T0(0);
3805
                        gen_op_movl_T0_im((int32_t)imm >> 31);
3806
                        gen_neon_movl_scratch_T0(1);
3807
                        NEON_GET_REG(T0, rm, pass * 2);
3808
                        NEON_GET_REG(T1, rm, pass * 2 + 1);
3809
                    }
3810

    
3811
                    if (gen_neon_shift_im[op][u][size] == NULL)
3812
                        return 1;
3813
                    gen_neon_shift_im[op][u][size]();
3814

    
3815
                    if (op == 1 || op == 3) {
3816
                        /* Accumulate.  */
3817
                        if (size == 3) {
3818
                            gen_neon_movl_scratch_T0(0);
3819
                            gen_neon_movl_scratch_T1(1);
3820
                            NEON_GET_REG(T0, rd, pass * 2);
3821
                            NEON_GET_REG(T1, rd, pass * 2 + 1);
3822
                            gen_op_neon_addl_u64();
3823
                        } else {
3824
                            NEON_GET_REG(T1, rd, pass);
3825
                            gen_neon_add(size);
3826
                        }
3827
                    } else if (op == 4 || (op == 5 && u)) {
3828
                        /* Insert */
3829
                        if (size == 3) {
3830
                            cpu_abort(env, "VS[LR]I.64 not implemented");
3831
                        }
3832
                        switch (size) {
3833
                        case 0:
3834
                            if (op == 4)
3835
                                imm = 0xff >> -shift;
3836
                            else
3837
                                imm = (uint8_t)(0xff << shift);
3838
                            imm |= imm << 8;
3839
                            imm |= imm << 16;
3840
                            break;
3841
                        case 1:
3842
                            if (op == 4)
3843
                                imm = 0xffff >> -shift;
3844
                            else
3845
                                imm = (uint16_t)(0xffff << shift);
3846
                            imm |= imm << 16;
3847
                            break;
3848
                        case 2:
3849
                            if (op == 4)
3850
                                imm = 0xffffffffu >> -shift;
3851
                            else
3852
                                imm = 0xffffffffu << shift;
3853
                            break;
3854
                        default:
3855
                            abort();
3856
                        }
3857
                        NEON_GET_REG(T1, rd, pass);
3858
                        gen_op_movl_T2_im(imm);
3859
                        gen_op_neon_bsl();
3860
                    }
3861
                    if (size == 3) {
3862
                        NEON_SET_REG(T0, rd, pass * 2);
3863
                        NEON_SET_REG(T1, rd, pass * 2 + 1);
3864
                    } else {
3865
                        NEON_SET_REG(T0, rd, pass);
3866
                    }
3867
                } /* for pass */
3868
            } else if (op < 10) {
3869
                /* Shift by immedaiate and narrow:
3870
                   VSHRN, VRSHRN, VQSHRN, VQRSHRN.  */
3871
                shift = shift - (1 << (size + 3));
3872
                size++;
3873
                if (size == 3) {
3874
                    count = q + 1;
3875
                } else {
3876
                    count = q ? 4: 2;
3877
                }
3878
                switch (size) {
3879
                case 1:
3880
                    imm = (uint16_t) shift;
3881
                    imm |= imm << 16;
3882
                    break;
3883
                case 2:
3884
                case 3:
3885
                    imm = shift;
3886
                    break;
3887
                default:
3888
                    abort();
3889
                }
3890

    
3891
                /* Processing MSB first means we need to do less shuffling at
3892
                   the end.  */
3893
                for (pass =  count - 1; pass >= 0; pass--) {
3894
                    /* Avoid clobbering the second operand before it has been
3895
                       written.  */
3896
                    n = pass;
3897
                    if (rd == rm)
3898
                        n ^= (count - 1);
3899
                    else
3900
                        n = pass;
3901

    
3902
                    if (size < 3) {
3903
                        /* Operands in T0 and T1.  */
3904
                        gen_op_movl_T1_im(imm);
3905
                        NEON_GET_REG(T0, rm, n);
3906
                    } else {
3907
                        /* Operands in {T0, T1} and env->vfp.scratch.  */
3908
                        gen_op_movl_T0_im(imm);
3909
                        gen_neon_movl_scratch_T0(0);
3910
                        gen_op_movl_T0_im((int32_t)imm >> 31);
3911
                        gen_neon_movl_scratch_T0(1);
3912
                        NEON_GET_REG(T0, rm, n * 2);
3913
                        NEON_GET_REG(T0, rm, n * 2 + 1);
3914
                    }
3915

    
3916
                    gen_neon_shift_im_narrow[q][u][size - 1]();
3917

    
3918
                    if (size < 3 && (pass & 1) == 0) {
3919
                        gen_neon_movl_scratch_T0(0);
3920
                    } else {
3921
                        uint32_t offset;
3922

    
3923
                        if (size < 3)
3924
                            gen_neon_movl_T1_scratch(0);
3925

    
3926
                        if (op == 8 && !u) {
3927
                            gen_neon_narrow[size - 1]();
3928
                        } else {
3929
                            if (op == 8)
3930
                                gen_neon_narrow_sats[size - 2]();
3931
                            else
3932
                                gen_neon_narrow_satu[size - 1]();
3933
                        }
3934
                        if (size == 3)
3935
                            offset = neon_reg_offset(rd, n);
3936
                        else
3937
                            offset = neon_reg_offset(rd, n >> 1);
3938
                        gen_op_neon_setreg_T0(offset);
3939
                    }
3940
                } /* for pass */
3941
            } else if (op == 10) {
3942
                /* VSHLL */
3943
                if (q)
3944
                    return 1;
3945
                for (pass = 0; pass < 2; pass++) {
3946
                    /* Avoid clobbering the input operand.  */
3947
                    if (rd == rm)
3948
                        n = 1 - pass;
3949
                    else
3950
                        n = pass;
3951

    
3952
                    NEON_GET_REG(T0, rm, n);
3953
                    GEN_NEON_INTEGER_OP(widen);
3954
                    if (shift != 0) {
3955
                        /* The shift is less than the width of the source
3956
                           type, so in some cases we can just
3957
                           shift the whole register.  */
3958
                        if (size == 1 || (size == 0 && u)) {
3959
                            gen_op_shll_T0_im(shift);
3960
                            gen_op_shll_T1_im(shift);
3961
                        } else {
3962
                            switch (size) {
3963
                            case 0: gen_op_neon_shll_u16(shift); break;
3964
                            case 2: gen_op_neon_shll_u64(shift); break;
3965
                            default: abort();
3966
                            }
3967
                        }
3968
                    }
3969
                    NEON_SET_REG(T0, rd, n * 2);
3970
                    NEON_SET_REG(T1, rd, n * 2 + 1);
3971
                }
3972
            } else if (op == 15 || op == 16) {
3973
                /* VCVT fixed-point.  */
3974
                for (pass = 0; pass < (q ? 4 : 2); pass++) {
3975
                    gen_op_vfp_getreg_F0s(neon_reg_offset(rm, pass));
3976
                    if (op & 1) {
3977
                        if (u)
3978
                            gen_op_vfp_ultos(shift);
3979
                        else
3980
                            gen_op_vfp_sltos(shift);
3981
                    } else {
3982
                        if (u)
3983
                            gen_op_vfp_touls(shift);
3984
                        else
3985
                            gen_op_vfp_tosls(shift);
3986
                    }
3987
                    gen_op_vfp_setreg_F0s(neon_reg_offset(rd, pass));
3988
                }
3989
            } else {
3990
                return 1;
3991
            }
3992
        } else { /* (insn & 0x00380080) == 0 */
3993
            int invert;
3994

    
3995
            op = (insn >> 8) & 0xf;
3996
            /* One register and immediate.  */
3997
            imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
3998
            invert = (insn & (1 << 5)) != 0;
3999
            switch (op) {
4000
            case 0: case 1:
4001
                /* no-op */
4002
                break;
4003
            case 2: case 3:
4004
                imm <<= 8;
4005
                break;
4006
            case 4: case 5:
4007
                imm <<= 16;
4008
                break;
4009
            case 6: case 7:
4010
                imm <<= 24;
4011
                break;
4012
            case 8: case 9:
4013
                imm |= imm << 16;
4014
                break;
4015
            case 10: case 11:
4016
                imm = (imm << 8) | (imm << 24);
4017
                break;
4018
            case 12:
4019
                imm = (imm < 8) | 0xff;
4020
                break;
4021
            case 13:
4022
                imm = (imm << 16) | 0xffff;
4023
                break;
4024
            case 14:
4025
                imm |= (imm << 8) | (imm << 16) | (imm << 24);
4026
                if (invert)
4027
                    imm = ~imm;
4028
                break;
4029
            case 15:
4030
                imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4031
                      | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4032
                break;
4033
            }
4034
            if (invert)
4035
                imm = ~imm;
4036

    
4037
            if (op != 14 || !invert)
4038
                gen_op_movl_T1_im(imm);
4039

    
4040
            for (pass = 0; pass < (q ? 4 : 2); pass++) {
4041
                if (op & 1 && op < 12) {
4042
                    NEON_GET_REG(T0, rd, pass);
4043
                    if (invert) {
4044
                        /* The immediate value has already been inverted, so
4045
                           BIC becomes AND.  */
4046
                        gen_op_andl_T0_T1();
4047
                    } else {
4048
                        gen_op_orl_T0_T1();
4049
                    }
4050
                    NEON_SET_REG(T0, rd, pass);
4051
                } else {
4052
                    if (op == 14 && invert) {
4053
                        uint32_t tmp;
4054
                        tmp = 0;
4055
                        for (n = 0; n < 4; n++) {
4056
                            if (imm & (1 << (n + (pass & 1) * 4)))
4057
                                tmp |= 0xff << (n * 8);
4058
                        }
4059
                        gen_op_movl_T1_im(tmp);
4060
                    }
4061
                    /* VMOV, VMVN.  */
4062
                    NEON_SET_REG(T1, rd, pass);
4063
                }
4064
            }
4065
        }
4066
    } else { /* (insn & 0x00800010 == 0x00800010) */
4067
        if (size != 3) {
4068
            op = (insn >> 8) & 0xf;
4069
            if ((insn & (1 << 6)) == 0) {
4070
                /* Three registers of different lengths.  */
4071
                int src1_wide;
4072
                int src2_wide;
4073
                int prewiden;
4074
                /* prewiden, src1_wide, src2_wide */
4075
                static const int neon_3reg_wide[16][3] = {
4076
                    {1, 0, 0}, /* VADDL */
4077
                    {1, 1, 0}, /* VADDW */
4078
                    {1, 0, 0}, /* VSUBL */
4079
                    {1, 1, 0}, /* VSUBW */
4080
                    {0, 1, 1}, /* VADDHN */
4081
                    {0, 0, 0}, /* VABAL */
4082
                    {0, 1, 1}, /* VSUBHN */
4083
                    {0, 0, 0}, /* VABDL */
4084
                    {0, 0, 0}, /* VMLAL */
4085
                    {0, 0, 0}, /* VQDMLAL */
4086
                    {0, 0, 0}, /* VMLSL */
4087
                    {0, 0, 0}, /* VQDMLSL */
4088
                    {0, 0, 0}, /* Integer VMULL */
4089
                    {0, 0, 0}, /* VQDMULL */
4090
                    {0, 0, 0}  /* Polynomial VMULL */
4091
                };
4092

    
4093
                prewiden = neon_3reg_wide[op][0];
4094
                src1_wide = neon_3reg_wide[op][1];
4095
                src2_wide = neon_3reg_wide[op][2];
4096

    
4097
                /* Avoid overlapping operands.  Wide source operands are
4098
                   always aligned so will never overlap with wide
4099
                   destinations in problematic ways.  */
4100
                if (rd == rm) {
4101
                    NEON_GET_REG(T2, rm, 1);
4102
                } else if (rd == rn) {
4103
                    NEON_GET_REG(T2, rn, 1);
4104
                }
4105
                for (pass = 0; pass < 2; pass++) {
4106
                    /* Load the second operand into env->vfp.scratch.
4107
                       Also widen narrow operands.  */
4108
                    if (pass == 1 && rd == rm) {
4109
                        if (prewiden) {
4110
                            gen_op_movl_T0_T2();
4111
                        } else {
4112
                            gen_op_movl_T1_T2();
4113
                        }
4114
                    } else {
4115
                        if (src2_wide) {
4116
                            NEON_GET_REG(T0, rm, pass * 2);
4117
                            NEON_GET_REG(T1, rm, pass * 2 + 1);
4118
                        } else {
4119
                            if (prewiden) {
4120
                                NEON_GET_REG(T0, rm, pass);
4121
                            } else {
4122
                                NEON_GET_REG(T1, rm, pass);
4123
                            }
4124
                        }
4125
                    }
4126
                    if (prewiden && !src2_wide) {
4127
                        GEN_NEON_INTEGER_OP(widen);
4128
                    }
4129
                    if (prewiden || src2_wide) {
4130
                        gen_neon_movl_scratch_T0(0);
4131
                        gen_neon_movl_scratch_T1(1);
4132
                    }
4133

    
4134
                    /* Load the first operand.  */
4135
                    if (pass == 1 && rd == rn) {
4136
                        gen_op_movl_T0_T2();
4137
                    } else {
4138
                        if (src1_wide) {
4139
                            NEON_GET_REG(T0, rn, pass * 2);
4140
                            NEON_GET_REG(T1, rn, pass * 2 + 1);
4141
                        } else {
4142
                            NEON_GET_REG(T0, rn, pass);
4143
                        }
4144
                    }
4145
                    if (prewiden && !src1_wide) {
4146
                        GEN_NEON_INTEGER_OP(widen);
4147
                    }
4148
                    switch (op) {
4149
                    case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4150
                        switch (size) {
4151
                        case 0: gen_op_neon_addl_u16(); break;
4152
                        case 1: gen_op_neon_addl_u32(); break;
4153
                        case 2: gen_op_neon_addl_u64(); break;
4154
                        default: abort();
4155
                        }
4156
                        break;
4157
                    case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4158
                        switch (size) {
4159
                        case 0: gen_op_neon_subl_u16(); break;
4160
                        case 1: gen_op_neon_subl_u32(); break;
4161
                        case 2: gen_op_neon_subl_u64(); break;
4162
                        default: abort();
4163
                        }
4164
                        break;
4165
                    case 5: case 7: /* VABAL, VABDL */
4166
                        switch ((size << 1) | u) {
4167
                        case 0: gen_op_neon_abdl_s16(); break;
4168
                        case 1: gen_op_neon_abdl_u16(); break;
4169
                        case 2: gen_op_neon_abdl_s32(); break;
4170
                        case 3: gen_op_neon_abdl_u32(); break;
4171
                        case 4: gen_op_neon_abdl_s64(); break;
4172
                        case 5: gen_op_neon_abdl_u64(); break;
4173
                        default: abort();
4174
                        }
4175
                        break;
4176
                    case 8: case 9: case 10: case 11: case 12: case 13:
4177
                        /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4178
                        switch ((size << 1) | u) {
4179
                        case 0: gen_op_neon_mull_s8(); break;
4180
                        case 1: gen_op_neon_mull_u8(); break;
4181
                        case 2: gen_op_neon_mull_s16(); break;
4182
                        case 3: gen_op_neon_mull_u16(); break;
4183
                        case 4: gen_op_imull_T0_T1(); break;
4184
                        case 5: gen_op_mull_T0_T1(); break;
4185
                        default: abort();
4186
                        }
4187
                        break;
4188
                    case 14: /* Polynomial VMULL */
4189
                        cpu_abort(env, "Polynomial VMULL not implemented");
4190

    
4191
                    default: /* 15 is RESERVED.  */
4192
                        return 1;
4193
                    }
4194
                    if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4195
                        /* Accumulate.  */
4196
                        if (op == 10 || op == 11) {
4197
                            switch (size) {
4198
                            case 0: gen_op_neon_negl_u16(); break;
4199
                            case 1: gen_op_neon_negl_u32(); break;
4200
                            case 2: gen_op_neon_negl_u64(); break;
4201
                            default: abort();
4202
                            }
4203
                        }
4204

    
4205
                        gen_neon_movl_scratch_T0(0);
4206
                        gen_neon_movl_scratch_T1(1);
4207

    
4208
                        if (op != 13) {
4209
                            NEON_GET_REG(T0, rd, pass * 2);
4210
                            NEON_GET_REG(T1, rd, pass * 2 + 1);
4211
                        }
4212

    
4213
                        switch (op) {
4214
                        case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4215
                            switch (size) {
4216
                            case 0: gen_op_neon_addl_u16(); break;
4217
                            case 1: gen_op_neon_addl_u32(); break;
4218
                            case 2: gen_op_neon_addl_u64(); break;
4219
                            default: abort();
4220
                            }
4221
                            break;
4222
                        case 9: case 11: /* VQDMLAL, VQDMLSL */
4223
                            switch (size) {
4224
                            case 1: gen_op_neon_addl_saturate_s32(); break;
4225
                            case 2: gen_op_neon_addl_saturate_s64(); break;
4226
                            default: abort();
4227
                            }
4228
                            /* Fall through.  */
4229
                        case 13: /* VQDMULL */
4230
                            switch (size) {
4231
                            case 1: gen_op_neon_addl_saturate_s32(); break;
4232
                            case 2: gen_op_neon_addl_saturate_s64(); break;
4233
                            default: abort();
4234
                            }
4235
                            break;
4236
                        default:
4237
                            abort();
4238
                        }
4239
                        NEON_SET_REG(T0, rd, pass * 2);
4240
                        NEON_SET_REG(T1, rd, pass * 2 + 1);
4241
                    } else if (op == 4 || op == 6) {
4242
                        /* Narrowing operation.  */
4243
                        if (u) {
4244
                            switch (size) {
4245
                            case 0: gen_op_neon_narrow_high_u8(); break;
4246
                            case 1: gen_op_neon_narrow_high_u16(); break;
4247
                            case 2: gen_op_movl_T0_T1(); break;
4248
                            default: abort();
4249
                            }
4250
                        } else {
4251
                            switch (size) {
4252
                            case 0: gen_op_neon_narrow_high_round_u8(); break;
4253
                            case 1: gen_op_neon_narrow_high_round_u16(); break;
4254
                            case 2: gen_op_neon_narrow_high_round_u32(); break;
4255
                            default: abort();
4256
                            }
4257
                        }
4258
                        NEON_SET_REG(T0, rd, pass);
4259
                    } else {
4260
                        /* Write back the result.  */
4261
                        NEON_SET_REG(T0, rd, pass * 2);
4262
                        NEON_SET_REG(T1, rd, pass * 2 + 1);
4263
                    }
4264
                }
4265
            } else {
4266
                /* Two registers and a scalar.  */
4267
                switch (op) {
4268
                case 0: /* Integer VMLA scalar */
4269
                case 1: /* Float VMLA scalar */
4270
                case 4: /* Integer VMLS scalar */
4271
                case 5: /* Floating point VMLS scalar */
4272
                case 8: /* Integer VMUL scalar */
4273
                case 9: /* Floating point VMUL scalar */
4274
                case 12: /* VQDMULH scalar */
4275
                case 13: /* VQRDMULH scalar */
4276
                    gen_neon_get_scalar(size, rm);
4277
                    gen_op_movl_T2_T0();
4278
                    for (pass = 0; pass < (u ? 4 : 2); pass++) {
4279
                        if (pass != 0)
4280
                            gen_op_movl_T0_T2();
4281
                        NEON_GET_REG(T1, rn, pass);
4282
                        if (op == 12) {
4283
                            if (size == 1) {
4284
                                gen_op_neon_qdmulh_s16();
4285
                            } else {
4286
                                gen_op_neon_qdmulh_s32();
4287
                            }
4288
                        } else if (op == 13) {
4289
                            if (size == 1) {
4290
                                gen_op_neon_qrdmulh_s16();
4291
                            } else {
4292
                                gen_op_neon_qrdmulh_s32();
4293
                            }
4294
                        } else if (op & 1) {
4295
                            gen_op_neon_mul_f32();
4296
                        } else {
4297
                            switch (size) {
4298
                            case 0: gen_op_neon_mul_u8(); break;
4299
                            case 1: gen_op_neon_mul_u16(); break;
4300
                            case 2: gen_op_mul_T0_T1(); break;
4301
                            default: return 1;
4302
                            }
4303
                        }
4304
                        if (op < 8) {
4305
                            /* Accumulate.  */
4306
                            NEON_GET_REG(T1, rd, pass);
4307
                            switch (op) {
4308
                            case 0:
4309
                                gen_neon_add(size);
4310
                                break;
4311
                            case 1:
4312
                                gen_op_neon_add_f32();
4313
                                break;
4314
                            case 4:
4315
                                switch (size) {
4316
                                case 0: gen_op_neon_rsb_u8(); break;
4317
                                case 1: gen_op_neon_rsb_u16(); break;
4318
                                case 2: gen_op_rsbl_T0_T1(); break;
4319
                                default: return 1;
4320
                                }
4321
                                break;
4322
                            case 5:
4323
                                gen_op_neon_rsb_f32();
4324
                                break;
4325
                            default:
4326
                                abort();
4327
                            }
4328
                        }
4329
                        NEON_SET_REG(T0, rd, pass);
4330
                    }
4331
                    break;
4332
                case 2: /* VMLAL sclar */
4333
                case 3: /* VQDMLAL scalar */
4334
                case 6: /* VMLSL scalar */
4335
                case 7: /* VQDMLSL scalar */
4336
                case 10: /* VMULL scalar */
4337
                case 11: /* VQDMULL scalar */
4338
                    if (rd == rn) {
4339
                        /* Save overlapping operands before they are
4340
                           clobbered.  */
4341
                        NEON_GET_REG(T0, rn, 1);
4342
                        gen_neon_movl_scratch_T0(2);
4343
                    }
4344
                    gen_neon_get_scalar(size, rm);
4345
                    gen_op_movl_T2_T0();
4346
                    for (pass = 0; pass < 2; pass++) {
4347
                        if (pass != 0) {
4348
                            gen_op_movl_T0_T2();
4349
                        }
4350
                        if (pass != 0 && rd == rn) {
4351
                            gen_neon_movl_T1_scratch(2);
4352
                        } else {
4353
                            NEON_GET_REG(T1, rn, pass);
4354
                        }
4355
                        switch ((size << 1) | u) {
4356
                        case 0: gen_op_neon_mull_s8(); break;
4357
                        case 1: gen_op_neon_mull_u8(); break;
4358
                        case 2: gen_op_neon_mull_s16(); break;
4359
                        case 3: gen_op_neon_mull_u16(); break;
4360
                        case 4: gen_op_imull_T0_T1(); break;
4361
                        case 5: gen_op_mull_T0_T1(); break;
4362
                        default: abort();
4363
                        }
4364
                        if (op == 6 || op == 7) {
4365
                            switch (size) {
4366
                            case 0: gen_op_neon_negl_u16(); break;
4367
                            case 1: gen_op_neon_negl_u32(); break;
4368
                            case 2: gen_op_neon_negl_u64(); break;
4369
                            default: abort();
4370
                            }
4371
                        }
4372
                        gen_neon_movl_scratch_T0(0);
4373
                        gen_neon_movl_scratch_T1(1);
4374
                        NEON_GET_REG(T0, rd, pass * 2);
4375
                        NEON_GET_REG(T1, rd, pass * 2 + 1);
4376
                        switch (op) {
4377
                        case 2: case 6:
4378
                            switch (size) {
4379
                            case 0: gen_op_neon_addl_u16(); break;
4380
                            case 1: gen_op_neon_addl_u32(); break;
4381
                            case 2: gen_op_neon_addl_u64(); break;
4382
                            default: abort();
4383
                            }
4384
                            break;
4385
                        case 3: case 7:
4386
                            switch (size) {
4387
                            case 1:
4388
                                gen_op_neon_addl_saturate_s32();
4389
                                gen_op_neon_addl_saturate_s32();
4390
                                break;
4391
                            case 2:
4392
                                gen_op_neon_addl_saturate_s64();
4393
                                gen_op_neon_addl_saturate_s64();
4394
                                break;
4395
                            default: abort();
4396
                            }
4397
                            break;
4398
                        case 10:
4399
                            /* no-op */
4400
                            break;
4401
                        case 11:
4402
                            switch (size) {
4403
                            case 1: gen_op_neon_addl_saturate_s32(); break;
4404
                            case 2: gen_op_neon_addl_saturate_s64(); break;
4405
                            default: abort();
4406
                            }
4407
                            break;
4408
                        default:
4409
                            abort();
4410
                        }
4411
                        NEON_SET_REG(T0, rd, pass * 2);
4412
                        NEON_SET_REG(T1, rd, pass * 2 + 1);
4413
                    }
4414
                    break;
4415
                default: /* 14 and 15 are RESERVED */
4416
                    return 1;
4417
                }
4418
            }
4419
        } else { /* size == 3 */
4420
            if (!u) {
4421
                /* Extract.  */
4422
                int reg;
4423
                imm = (insn >> 8) & 0xf;
4424
                reg = rn;
4425
                count = q ? 4 : 2;
4426
                n = imm >> 2;
4427
                NEON_GET_REG(T0, reg, n);
4428
                for (pass = 0; pass < count; pass++) {
4429
                    n++;
4430
                    if (n > count) {
4431
                        reg = rm;
4432
                        n -= count;
4433
                    }
4434
                    if (imm & 3) {
4435
                        NEON_GET_REG(T1, reg, n);
4436
                        gen_op_neon_extract((insn << 3) & 0x1f);
4437
                    }
4438
                    /* ??? This is broken if rd and rm overlap */
4439
                    NEON_SET_REG(T0, rd, pass);
4440
                    if (imm & 3) {
4441
                        gen_op_movl_T0_T1();
4442
                    } else {
4443
                        NEON_GET_REG(T0, reg, n);
4444
                    }
4445
                }
4446
            } else if ((insn & (1 << 11)) == 0) {
4447
                /* Two register misc.  */
4448
                op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
4449
                size = (insn >> 18) & 3;
4450
                switch (op) {
4451
                case 0: /* VREV64 */
4452
                    if (size == 3)
4453
                        return 1;
4454
                    for (pass = 0; pass < (q ? 2 : 1); pass++) {
4455
                        NEON_GET_REG(T0, rm, pass * 2);
4456
                        NEON_GET_REG(T1, rm, pass * 2 + 1);
4457
                        switch (size) {
4458
                        case 0: gen_op_rev_T0(); break;
4459
                        case 1: gen_swap_half(cpu_T[0]); break;
4460
                        case 2: /* no-op */ break;
4461
                        default: abort();
4462
                        }
4463
                        NEON_SET_REG(T0, rd, pass * 2 + 1);
4464
                        if (size == 2) {
4465
                            NEON_SET_REG(T1, rd, pass * 2);
4466
                        } else {
4467
                            gen_op_movl_T0_T1();
4468
                            switch (size) {
4469
                            case 0: gen_op_rev_T0(); break;
4470
                            case 1: gen_swap_half(cpu_T[0]); break;
4471
                            default: abort();
4472
                            }
4473
                            NEON_SET_REG(T0, rd, pass * 2);
4474
                        }
4475
                    }
4476
                    break;
4477
                case 4: case 5: /* VPADDL */
4478
                case 12: case 13: /* VPADAL */
4479
                    if (size < 2)
4480
                        goto elementwise;
4481
                    if (size == 3)
4482
                        return 1;
4483
                    for (pass = 0; pass < (q ? 2 : 1); pass++) {
4484
                        NEON_GET_REG(T0, rm, pass * 2);
4485
                        NEON_GET_REG(T1, rm, pass * 2 + 1);
4486
                        if (op & 1)
4487
                            gen_op_neon_paddl_u32();
4488
                        else
4489
                            gen_op_neon_paddl_s32();
4490
                        if (op >= 12) {
4491
                            /* Accumulate.  */
4492
                            gen_neon_movl_scratch_T0(0);
4493
                            gen_neon_movl_scratch_T1(1);
4494

    
4495
                            NEON_GET_REG(T0, rd, pass * 2);
4496
                            NEON_GET_REG(T1, rd, pass * 2 + 1);
4497
                            gen_op_neon_addl_u64();
4498
                        }
4499
                        NEON_SET_REG(T0, rd, pass * 2);
4500
                        NEON_SET_REG(T1, rd, pass * 2 + 1);
4501
                    }
4502
                    break;
4503
                case 33: /* VTRN */
4504
                    if (size == 2) {
4505
                        for (n = 0; n < (q ? 4 : 2); n += 2) {
4506
                            NEON_GET_REG(T0, rm, n);
4507
                            NEON_GET_REG(T1, rd, n + 1);
4508
                            NEON_SET_REG(T1, rm, n);
4509
                            NEON_SET_REG(T0, rd, n + 1);
4510
                        }
4511
                    } else {
4512
                        goto elementwise;
4513
                    }
4514
                    break;
4515
                case 34: /* VUZP */
4516
                    /* Reg  Before       After
4517
                       Rd   A3 A2 A1 A0  B2 B0 A2 A0
4518
                       Rm   B3 B2 B1 B0  B3 B1 A3 A1
4519
                     */
4520
                    if (size == 3)
4521
                        return 1;
4522
                    gen_neon_unzip(rd, q, 0, size);
4523
                    gen_neon_unzip(rm, q, 4, size);
4524
                    if (q) {
4525
                        static int unzip_order_q[8] =
4526
                            {0, 2, 4, 6, 1, 3, 5, 7};
4527
                        for (n = 0; n < 8; n++) {
4528
                            int reg = (n < 4) ? rd : rm;
4529
                            gen_neon_movl_T0_scratch(unzip_order_q[n]);
4530
                            NEON_SET_REG(T0, reg, n % 4);
4531
                        }
4532
                    } else {
4533
                        static int unzip_order[4] =
4534
                            {0, 4, 1, 5};
4535
                        for (n = 0; n < 4; n++) {
4536
                            int reg = (n < 2) ? rd : rm;
4537
                            gen_neon_movl_T0_scratch(unzip_order[n]);
4538
                            NEON_SET_REG(T0, reg, n % 2);
4539
                        }
4540
                    }
4541
                    break;
4542
                case 35: /* VZIP */
4543
                    /* Reg  Before       After
4544
                       Rd   A3 A2 A1 A0  B1 A1 B0 A0
4545
                       Rm   B3 B2 B1 B0  B3 A3 B2 A2
4546
                     */
4547
                    if (size == 3)
4548
                        return 1;
4549
                    count = (q ? 4 : 2);
4550
                    for (n = 0; n < count; n++) {
4551
                        NEON_GET_REG(T0, rd, n);
4552
                        NEON_GET_REG(T1, rd, n);
4553
                        switch (size) {
4554
                        case 0: gen_op_neon_zip_u8(); break;
4555
                        case 1: gen_op_neon_zip_u16(); break;
4556
                        case 2: /* no-op */; break;
4557
                        default: abort();
4558
                        }
4559
                        gen_neon_movl_scratch_T0(n * 2);
4560
                        gen_neon_movl_scratch_T1(n * 2 + 1);
4561
                    }
4562
                    for (n = 0; n < count * 2; n++) {
4563
                        int reg = (n < count) ? rd : rm;
4564
                        gen_neon_movl_T0_scratch(n);
4565
                        NEON_SET_REG(T0, reg, n % count);
4566
                    }
4567
                    break;
4568
                case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
4569
                    for (pass = 0; pass < 2; pass++) {
4570
                        if (rd == rm + 1) {
4571
                            n = 1 - pass;
4572
                        } else {
4573
                            n = pass;
4574
                        }
4575
                        NEON_GET_REG(T0, rm, n * 2);
4576
                        NEON_GET_REG(T1, rm, n * 2 + 1);
4577
                        if (op == 36 && q == 0) {
4578
                            switch (size) {
4579
                            case 0: gen_op_neon_narrow_u8(); break;
4580
                            case 1: gen_op_neon_narrow_u16(); break;
4581
                            case 2: /* no-op */ break;
4582
                            default: return 1;
4583
                            }
4584
                        } else if (q) {
4585
                            switch (size) {
4586
                            case 0: gen_op_neon_narrow_sat_u8(); break;
4587
                            case 1: gen_op_neon_narrow_sat_u16(); break;
4588
                            case 2: gen_op_neon_narrow_sat_u32(); break;
4589
                            default: return 1;
4590
                            }
4591
                        } else {
4592
                            switch (size) {
4593
                            case 0: gen_op_neon_narrow_sat_s8(); break;
4594
                            case 1: gen_op_neon_narrow_sat_s16(); break;
4595
                            case 2: gen_op_neon_narrow_sat_s32(); break;
4596
                            default: return 1;
4597
                            }
4598
                        }
4599
                        NEON_SET_REG(T0, rd, n);
4600
                    }
4601
                    break;
4602
                case 38: /* VSHLL */
4603
                    if (q)
4604
                        return 1;
4605
                    if (rm == rd) {
4606
                        NEON_GET_REG(T2, rm, 1);
4607
                    }
4608
                    for (pass = 0; pass < 2; pass++) {
4609
                        if (pass == 1 && rm == rd) {
4610
                            gen_op_movl_T0_T2();
4611
                        } else {
4612
                            NEON_GET_REG(T0, rm, pass);
4613
                        }
4614
                        switch (size) {
4615
                        case 0: gen_op_neon_widen_high_u8(); break;
4616
                        case 1: gen_op_neon_widen_high_u16(); break;
4617
                        case 2:
4618
                            gen_op_movl_T1_T0();
4619
                            gen_op_movl_T0_im(0);
4620
                            break;
4621
                        default: return 1;
4622
                        }
4623
                        NEON_SET_REG(T0, rd, pass * 2);
4624
                        NEON_SET_REG(T1, rd, pass * 2 + 1);
4625
                    }
4626
                    break;
4627
                default:
4628
                elementwise:
4629
                    for (pass = 0; pass < (q ? 4 : 2); pass++) {
4630
                        if (op == 30 || op == 31 || op >= 58) {
4631
                            gen_op_vfp_getreg_F0s(neon_reg_offset(rm, pass));
4632
                        } else {
4633
                            NEON_GET_REG(T0, rm, pass);
4634
                        }
4635
                        switch (op) {
4636
                        case 1: /* VREV32 */
4637
                            switch (size) {
4638
                            case 0: gen_op_rev_T0(); break;
4639
                            case 1: gen_swap_half(cpu_T[0]); break;
4640
                            default: return 1;
4641
                            }
4642
                            break;
4643
                        case 2: /* VREV16 */
4644
                            if (size != 0)
4645
                                return 1;
4646
                            gen_rev16(cpu_T[0]);
4647
                            break;
4648
                        case 4: case 5: /* VPADDL */
4649
                        case 12: case 13: /* VPADAL */
4650
                            switch ((size << 1) | (op & 1)) {
4651
                            case 0: gen_op_neon_paddl_s8(); break;
4652
                            case 1: gen_op_neon_paddl_u8(); break;
4653
                            case 2: gen_op_neon_paddl_s16(); break;
4654
                            case 3: gen_op_neon_paddl_u16(); break;
4655
                            default: abort();
4656
                            }
4657
                            if (op >= 12) {
4658
                                /* Accumulate */
4659
                                NEON_GET_REG(T1, rd, pass);
4660
                                switch (size) {
4661
                                case 0: gen_op_neon_add_u16(); break;
4662
                                case 1: gen_op_addl_T0_T1(); break;
4663
                                default: abort();
4664
                                }
4665
                            }
4666
                            break;
4667
                        case 8: /* CLS */
4668
                            switch (size) {
4669
                            case 0: gen_op_neon_cls_s8(); break;
4670
                            case 1: gen_op_neon_cls_s16(); break;
4671
                            case 2: gen_op_neon_cls_s32(); break;
4672
                            default: return 1;
4673
                            }
4674
                            break;
4675
                        case 9: /* CLZ */
4676
                            switch (size) {
4677
                            case 0: gen_op_neon_clz_u8(); break;
4678
                            case 1: gen_op_neon_clz_u16(); break;
4679
                            case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
4680
                            default: return 1;
4681
                            }
4682
                            break;
4683
                        case 10: /* CNT */
4684
                            if (size != 0)
4685
                                return 1;
4686
                            gen_op_neon_cnt_u8();
4687
                            break;
4688
                        case 11: /* VNOT */
4689
                            if (size != 0)
4690
                                return 1;
4691
                            gen_op_notl_T0();
4692
                            break;
4693
                        case 14: /* VQABS */
4694
                            switch (size) {
4695
                            case 0: gen_op_neon_qabs_s8(); break;
4696
                            case 1: gen_op_neon_qabs_s16(); break;
4697
                            case 2: gen_op_neon_qabs_s32(); break;
4698
                            default: return 1;
4699
                            }
4700
                            break;
4701
                        case 15: /* VQNEG */
4702
                            switch (size) {
4703
                            case 0: gen_op_neon_qneg_s8(); break;
4704
                            case 1: gen_op_neon_qneg_s16(); break;
4705
                            case 2: gen_op_neon_qneg_s32(); break;
4706
                            default: return 1;
4707
                            }
4708
                            break;
4709
                        case 16: case 19: /* VCGT #0, VCLE #0 */
4710
                            gen_op_movl_T1_im(0);
4711
                            switch(size) {
4712
                            case 0: gen_op_neon_cgt_s8(); break;
4713
                            case 1: gen_op_neon_cgt_s16(); break;
4714
                            case 2: gen_op_neon_cgt_s32(); break;
4715
                            default: return 1;
4716
                            }
4717
                            if (op == 19)
4718
                                gen_op_notl_T0();
4719
                            break;
4720
                        case 17: case 20: /* VCGE #0, VCLT #0 */
4721
                            gen_op_movl_T1_im(0);
4722
                            switch(size) {
4723
                            case 0: gen_op_neon_cge_s8(); break;
4724
                            case 1: gen_op_neon_cge_s16(); break;
4725
                            case 2: gen_op_neon_cge_s32(); break;
4726
                            default: return 1;
4727
                            }
4728
                            if (op == 20)
4729
                                gen_op_notl_T0();
4730
                            break;
4731
                        case 18: /* VCEQ #0 */
4732
                            gen_op_movl_T1_im(0);
4733
                            switch(size) {
4734
                            case 0: gen_op_neon_ceq_u8(); break;
4735
                            case 1: gen_op_neon_ceq_u16(); break;
4736
                            case 2: gen_op_neon_ceq_u32(); break;
4737
                            default: return 1;
4738
                            }
4739
                            break;
4740
                        case 22: /* VABS */
4741
                            switch(size) {
4742
                            case 0: gen_op_neon_abs_s8(); break;
4743
                            case 1: gen_op_neon_abs_s16(); break;
4744
                            case 2: gen_op_neon_abs_s32(); break;
4745
                            default: return 1;
4746
                            }
4747
                            break;
4748
                        case 23: /* VNEG */
4749
                            gen_op_movl_T1_im(0);
4750
                            switch(size) {
4751
                            case 0: gen_op_neon_rsb_u8(); break;
4752
                            case 1: gen_op_neon_rsb_u16(); break;
4753
                            case 2: gen_op_rsbl_T0_T1(); break;
4754
                            default: return 1;
4755
                            }
4756
                            break;
4757
                        case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
4758
                            gen_op_movl_T1_im(0);
4759
                            gen_op_neon_cgt_f32();
4760
                            if (op == 27)
4761
                                gen_op_notl_T0();
4762
                            break;
4763
                        case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
4764
                            gen_op_movl_T1_im(0);
4765
                            gen_op_neon_cge_f32();
4766
                            if (op == 28)
4767
                                gen_op_notl_T0();
4768
                            break;
4769
                        case 26: /* Float VCEQ #0 */
4770
                            gen_op_movl_T1_im(0);
4771
                            gen_op_neon_ceq_f32();
4772
                            break;
4773
                        case 30: /* Float VABS */
4774
                            gen_op_vfp_abss();
4775
                            break;
4776
                        case 31: /* Float VNEG */
4777
                            gen_op_vfp_negs();
4778
                            break;
4779
                        case 32: /* VSWP */
4780
                            NEON_GET_REG(T1, rd, pass);
4781
                            NEON_SET_REG(T1, rm, pass);
4782
                            break;
4783
                        case 33: /* VTRN */
4784
                            NEON_GET_REG(T1, rd, pass);
4785
                            switch (size) {
4786
                            case 0: gen_op_neon_trn_u8(); break;
4787
                            case 1: gen_op_neon_trn_u16(); break;
4788
                            case 2: abort();
4789
                            default: return 1;
4790
                            }
4791
                            NEON_SET_REG(T1, rm, pass);
4792
                            break;
4793
                        case 56: /* Integer VRECPE */
4794
                            gen_op_neon_recpe_u32();
4795
                            break;
4796
                        case 57: /* Integer VRSQRTE */
4797
                            gen_op_neon_rsqrte_u32();
4798
                            break;
4799
                        case 58: /* Float VRECPE */
4800
                            gen_op_neon_recpe_f32();
4801
                            break;
4802
                        case 59: /* Float VRSQRTE */
4803
                            gen_op_neon_rsqrte_f32();
4804
                            break;
4805
                        case 60: /* VCVT.F32.S32 */
4806
                            gen_op_vfp_tosizs();
4807
                            break;
4808
                        case 61: /* VCVT.F32.U32 */
4809
                            gen_op_vfp_touizs();
4810
                            break;
4811
                        case 62: /* VCVT.S32.F32 */
4812
                            gen_op_vfp_sitos();
4813
                            break;
4814
                        case 63: /* VCVT.U32.F32 */
4815
                            gen_op_vfp_uitos();
4816
                            break;
4817
                        default:
4818
                            /* Reserved: 21, 29, 39-56 */
4819
                            return 1;
4820
                        }
4821
                        if (op == 30 || op == 31 || op >= 58) {
4822
                            gen_op_vfp_setreg_F0s(neon_reg_offset(rm, pass));
4823
                        } else {
4824
                            NEON_SET_REG(T0, rd, pass);
4825
                        }
4826
                    }
4827
                    break;
4828
                }
4829
            } else if ((insn & (1 << 10)) == 0) {
4830
                /* VTBL, VTBX.  */
4831
                n = (insn >> 5) & 0x18;
4832
                NEON_GET_REG(T1, rm, 0);
4833
                if (insn & (1 << 6)) {
4834
                    NEON_GET_REG(T0, rd, 0);
4835
                } else {
4836
                    gen_op_movl_T0_im(0);
4837
                }
4838
                gen_op_neon_tbl(rn, n);
4839
                gen_op_movl_T2_T0();
4840
                NEON_GET_REG(T1, rm, 1);
4841
                if (insn & (1 << 6)) {
4842
                    NEON_GET_REG(T0, rd, 0);
4843
                } else {
4844
                    gen_op_movl_T0_im(0);
4845
                }
4846
                gen_op_neon_tbl(rn, n);
4847
                NEON_SET_REG(T2, rd, 0);
4848
                NEON_SET_REG(T0, rd, 1);
4849
            } else if ((insn & 0x380) == 0) {
4850
                /* VDUP */
4851
                if (insn & (1 << 19)) {
4852
                    NEON_SET_REG(T0, rm, 1);
4853
                } else {
4854
                    NEON_SET_REG(T0, rm, 0);
4855
                }
4856
                if (insn & (1 << 16)) {
4857
                    gen_op_neon_dup_u8(((insn >> 17) & 3) * 8);
4858
                } else if (insn & (1 << 17)) {
4859
                    if ((insn >> 18) & 1)
4860
                        gen_op_neon_dup_high16();
4861
                    else
4862
                        gen_op_neon_dup_low16();
4863
                }
4864
                for (pass = 0; pass < (q ? 4 : 2); pass++) {
4865
                    NEON_SET_REG(T0, rd, pass);
4866
                }
4867
            } else {
4868
                return 1;
4869
            }
4870
        }
4871
    }
4872
    return 0;
4873
}
4874

    
4875
static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
4876
{
4877
    int cpnum;
4878

    
4879
    cpnum = (insn >> 8) & 0xf;
4880
    if (arm_feature(env, ARM_FEATURE_XSCALE)
4881
            && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
4882
        return 1;
4883

    
4884
    switch (cpnum) {
4885
      case 0:
4886
      case 1:
4887
        if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
4888
            return disas_iwmmxt_insn(env, s, insn);
4889
        } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
4890
            return disas_dsp_insn(env, s, insn);
4891
        }
4892
        return 1;
4893
    case 10:
4894
    case 11:
4895
        return disas_vfp_insn (env, s, insn);
4896
    case 15:
4897
        return disas_cp15_insn (env, s, insn);
4898
    default:
4899
        /* Unknown coprocessor.  See if the board has hooked it.  */
4900
        return disas_cp_insn (env, s, insn);
4901
    }
4902
}
4903

    
4904
static void disas_arm_insn(CPUState * env, DisasContext *s)
4905
{
4906
    unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
4907
    TCGv tmp;
4908
    TCGv tmp2;
4909

    
4910
    insn = ldl_code(s->pc);
4911
    s->pc += 4;
4912

    
4913
    /* M variants do not implement ARM mode.  */
4914
    if (IS_M(env))
4915
        goto illegal_op;
4916
    cond = insn >> 28;
4917
    if (cond == 0xf){
4918
        /* Unconditional instructions.  */
4919
        if (((insn >> 25) & 7) == 1) {
4920
            /* NEON Data processing.  */
4921
            if (!arm_feature(env, ARM_FEATURE_NEON))
4922
                goto illegal_op;
4923

    
4924
            if (disas_neon_data_insn(env, s, insn))
4925
                goto illegal_op;
4926
            return;
4927
        }
4928
        if ((insn & 0x0f100000) == 0x04000000) {
4929
            /* NEON load/store.  */
4930
            if (!arm_feature(env, ARM_FEATURE_NEON))
4931
                goto illegal_op;
4932

    
4933
            if (disas_neon_ls_insn(env, s, insn))
4934
                goto illegal_op;
4935
            return;
4936
        }
4937
        if ((insn & 0x0d70f000) == 0x0550f000)
4938
            return; /* PLD */
4939
        else if ((insn & 0x0ffffdff) == 0x01010000) {
4940
            ARCH(6);
4941
            /* setend */
4942
            if (insn & (1 << 9)) {
4943
                /* BE8 mode not implemented.  */
4944
                goto illegal_op;
4945
            }
4946
            return;
4947
        } else if ((insn & 0x0fffff00) == 0x057ff000) {
4948
            switch ((insn >> 4) & 0xf) {
4949
            case 1: /* clrex */
4950
                ARCH(6K);
4951
                gen_op_clrex();
4952
                return;
4953
            case 4: /* dsb */
4954
            case 5: /* dmb */
4955
            case 6: /* isb */
4956
                ARCH(7);
4957
                /* We don't emulate caches so these are a no-op.  */
4958
                return;
4959
            default:
4960
                goto illegal_op;
4961
            }
4962
        } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
4963
            /* srs */
4964
            uint32_t offset;
4965
            if (IS_USER(s))
4966
                goto illegal_op;
4967
            ARCH(6);
4968
            op1 = (insn & 0x1f);
4969
            if (op1 == (env->uncached_cpsr & CPSR_M)) {
4970
                gen_movl_T1_reg(s, 13);
4971
            } else {
4972
                gen_op_movl_T1_r13_banked(op1);
4973
            }
4974
            i = (insn >> 23) & 3;
4975
            switch (i) {
4976
            case 0: offset = -4; break; /* DA */
4977
            case 1: offset = -8; break; /* DB */
4978
            case 2: offset = 0; break; /* IA */
4979
            case 3: offset = 4; break; /* IB */
4980
            default: abort();
4981
            }
4982
            if (offset)
4983
                gen_op_addl_T1_im(offset);
4984
            gen_movl_T0_reg(s, 14);
4985
            gen_ldst(stl, s);
4986
            gen_op_movl_T0_cpsr();
4987
            gen_op_addl_T1_im(4);
4988
            gen_ldst(stl, s);
4989
            if (insn & (1 << 21)) {
4990
                /* Base writeback.  */
4991
                switch (i) {
4992
                case 0: offset = -8; break;
4993
                case 1: offset = -4; break;
4994
                case 2: offset = 4; break;
4995
                case 3: offset = 0; break;
4996
                default: abort();
4997
                }
4998
                if (offset)
4999
                    gen_op_addl_T1_im(offset);
5000
                if (op1 == (env->uncached_cpsr & CPSR_M)) {
5001
                    gen_movl_reg_T1(s, 13);
5002
                } else {
5003
                    gen_op_movl_r13_T1_banked(op1);
5004
                }
5005
            }
5006
        } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5007
            /* rfe */
5008
            uint32_t offset;
5009
            if (IS_USER(s))
5010
                goto illegal_op;
5011
            ARCH(6);
5012
            rn = (insn >> 16) & 0xf;
5013
            gen_movl_T1_reg(s, rn);
5014
            i = (insn >> 23) & 3;
5015
            switch (i) {
5016
            case 0: offset = 0; break; /* DA */
5017
            case 1: offset = -4; break; /* DB */
5018
            case 2: offset = 4; break; /* IA */
5019
            case 3: offset = 8; break; /* IB */
5020
            default: abort();
5021
            }
5022
            if (offset)
5023
                gen_op_addl_T1_im(offset);
5024
            /* Load CPSR into T2 and PC into T0.  */
5025
            gen_ldst(ldl, s);
5026
            gen_op_movl_T2_T0();
5027
            gen_op_addl_T1_im(-4);
5028
            gen_ldst(ldl, s);
5029
            if (insn & (1 << 21)) {
5030
                /* Base writeback.  */
5031
                switch (i) {
5032
                case 0: offset = -4; break;
5033
                case 1: offset = 0; break;
5034
                case 2: offset = 8; break;
5035
                case 3: offset = 4; break;
5036
                default: abort();
5037
                }
5038
                if (offset)
5039
                    gen_op_addl_T1_im(offset);
5040
                gen_movl_reg_T1(s, rn);
5041
            }
5042
            gen_rfe(s);
5043
        } else if ((insn & 0x0e000000) == 0x0a000000) {
5044
            /* branch link and change to thumb (blx <offset>) */
5045
            int32_t offset;
5046

    
5047
            val = (uint32_t)s->pc;
5048
            gen_op_movl_T0_im(val);
5049
            gen_movl_reg_T0(s, 14);
5050
            /* Sign-extend the 24-bit offset */
5051
            offset = (((int32_t)insn) << 8) >> 8;
5052
            /* offset * 4 + bit24 * 2 + (thumb bit) */
5053
            val += (offset << 2) | ((insn >> 23) & 2) | 1;
5054
            /* pipeline offset */
5055
            val += 4;
5056
            gen_op_movl_T0_im(val);
5057
            gen_bx(s);
5058
            return;
5059
        } else if ((insn & 0x0e000f00) == 0x0c000100) {
5060
            if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5061
                /* iWMMXt register transfer.  */
5062
                if (env->cp15.c15_cpar & (1 << 1))
5063
                    if (!disas_iwmmxt_insn(env, s, insn))
5064
                        return;
5065
            }
5066
        } else if ((insn & 0x0fe00000) == 0x0c400000) {
5067
            /* Coprocessor double register transfer.  */
5068
        } else if ((insn & 0x0f000010) == 0x0e000010) {
5069
            /* Additional coprocessor register transfer.  */
5070
        } else if ((insn & 0x0ff10010) == 0x01000000) {
5071
            uint32_t mask;
5072
            uint32_t val;
5073
            /* cps (privileged) */
5074
            if (IS_USER(s))
5075
                return;
5076
            mask = val = 0;
5077
            if (insn & (1 << 19)) {
5078
                if (insn & (1 << 8))
5079
                    mask |= CPSR_A;
5080
                if (insn & (1 << 7))
5081
                    mask |= CPSR_I;
5082
                if (insn & (1 << 6))
5083
                    mask |= CPSR_F;
5084
                if (insn & (1 << 18))
5085
                    val |= mask;
5086
            }
5087
            if (insn & (1 << 14)) {
5088
                mask |= CPSR_M;
5089
                val |= (insn & 0x1f);
5090
            }
5091
            if (mask) {
5092
                gen_op_movl_T0_im(val);
5093
                gen_set_psr_T0(s, mask, 0);
5094
            }
5095
            return;
5096
        }
5097
        goto illegal_op;
5098
    }
5099
    if (cond != 0xe) {
5100
        /* if not always execute, we generate a conditional jump to
5101
           next instruction */
5102
        s->condlabel = gen_new_label();
5103
        gen_test_cc[cond ^ 1](s->condlabel);
5104
        s->condjmp = 1;
5105
    }
5106
    if ((insn & 0x0f900000) == 0x03000000) {
5107
        if ((insn & (1 << 21)) == 0) {
5108
            ARCH(6T2);
5109
            rd = (insn >> 12) & 0xf;
5110
            val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5111
            if ((insn & (1 << 22)) == 0) {
5112
                /* MOVW */
5113
                gen_op_movl_T0_im(val);
5114
            } else {
5115
                /* MOVT */
5116
                gen_movl_T0_reg(s, rd);
5117
                gen_op_movl_T1_im(0xffff);
5118
                gen_op_andl_T0_T1();
5119
                gen_op_movl_T1_im(val << 16);
5120
                gen_op_orl_T0_T1();
5121
            }
5122
            gen_movl_reg_T0(s, rd);
5123
        } else {
5124
            if (((insn >> 12) & 0xf) != 0xf)
5125
                goto illegal_op;
5126
            if (((insn >> 16) & 0xf) == 0) {
5127
                gen_nop_hint(s, insn & 0xff);
5128
            } else {
5129
                /* CPSR = immediate */
5130
                val = insn & 0xff;
5131
                shift = ((insn >> 8) & 0xf) * 2;
5132
                if (shift)
5133
                    val = (val >> shift) | (val << (32 - shift));
5134
                gen_op_movl_T0_im(val);
5135
                i = ((insn & (1 << 22)) != 0);
5136
                if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5137
                    goto illegal_op;
5138
            }
5139
        }
5140
    } else if ((insn & 0x0f900000) == 0x01000000
5141
               && (insn & 0x00000090) != 0x00000090) {
5142
        /* miscellaneous instructions */
5143
        op1 = (insn >> 21) & 3;
5144
        sh = (insn >> 4) & 0xf;
5145
        rm = insn & 0xf;
5146
        switch (sh) {
5147
        case 0x0: /* move program status register */
5148
            if (op1 & 1) {
5149
                /* PSR = reg */
5150
                gen_movl_T0_reg(s, rm);
5151
                i = ((op1 & 2) != 0);
5152
                if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5153
                    goto illegal_op;
5154
            } else {
5155
                /* reg = PSR */
5156
                rd = (insn >> 12) & 0xf;
5157
                if (op1 & 2) {
5158
                    if (IS_USER(s))
5159
                        goto illegal_op;
5160
                    gen_op_movl_T0_spsr();
5161
                } else {
5162
                    gen_op_movl_T0_cpsr();
5163
                }
5164
                gen_movl_reg_T0(s, rd);
5165
            }
5166
            break;
5167
        case 0x1:
5168
            if (op1 == 1) {
5169
                /* branch/exchange thumb (bx).  */
5170
                gen_movl_T0_reg(s, rm);
5171
                gen_bx(s);
5172
            } else if (op1 == 3) {
5173
                /* clz */
5174
                rd = (insn >> 12) & 0xf;
5175
                tmp = load_reg(s, rm);
5176
                gen_helper_clz(tmp, tmp);
5177
                store_reg(s, rd, tmp);
5178
            } else {
5179
                goto illegal_op;
5180
            }
5181
            break;
5182
        case 0x2:
5183
            if (op1 == 1) {
5184
                ARCH(5J); /* bxj */
5185
                /* Trivial implementation equivalent to bx.  */
5186
                gen_movl_T0_reg(s, rm);
5187
                gen_bx(s);
5188
            } else {
5189
                goto illegal_op;
5190
            }
5191
            break;
5192
        case 0x3:
5193
            if (op1 != 1)
5194
              goto illegal_op;
5195

    
5196
            /* branch link/exchange thumb (blx) */
5197
            val = (uint32_t)s->pc;
5198
            gen_op_movl_T1_im(val);
5199
            gen_movl_T0_reg(s, rm);
5200
            gen_movl_reg_T1(s, 14);
5201
            gen_bx(s);
5202
            break;
5203
        case 0x5: /* saturating add/subtract */
5204
            rd = (insn >> 12) & 0xf;
5205
            rn = (insn >> 16) & 0xf;
5206
            gen_movl_T0_reg(s, rm);
5207
            gen_movl_T1_reg(s, rn);
5208
            if (op1 & 2)
5209
                gen_helper_double_saturate(cpu_T[1], cpu_T[1]);
5210
            if (op1 & 1)
5211
                gen_op_subl_T0_T1_saturate();
5212
            else
5213
                gen_op_addl_T0_T1_saturate();
5214
            gen_movl_reg_T0(s, rd);
5215
            break;
5216
        case 7: /* bkpt */
5217
            gen_set_condexec(s);
5218
            gen_op_movl_T0_im((long)s->pc - 4);
5219
            gen_set_pc_T0();
5220
            gen_op_bkpt();
5221
            s->is_jmp = DISAS_JUMP;
5222
            break;
5223
        case 0x8: /* signed multiply */
5224
        case 0xa:
5225
        case 0xc:
5226
        case 0xe:
5227
            rs = (insn >> 8) & 0xf;
5228
            rn = (insn >> 12) & 0xf;
5229
            rd = (insn >> 16) & 0xf;
5230
            if (op1 == 1) {
5231
                /* (32 * 16) >> 16 */
5232
                gen_movl_T0_reg(s, rm);
5233
                gen_movl_T1_reg(s, rs);
5234
                if (sh & 4)
5235
                    gen_op_sarl_T1_im(16);
5236
                else
5237
                    gen_sxth(cpu_T[1]);
5238
                gen_op_imulw_T0_T1();
5239
                if ((sh & 2) == 0) {
5240
                    gen_movl_T1_reg(s, rn);
5241
                    gen_op_addl_T0_T1_setq();
5242
                }
5243
                gen_movl_reg_T0(s, rd);
5244
            } else {
5245
                /* 16 * 16 */
5246
                gen_movl_T0_reg(s, rm);
5247
                gen_movl_T1_reg(s, rs);
5248
                gen_mulxy(sh & 2, sh & 4);
5249
                if (op1 == 2) {
5250
                    gen_op_signbit_T1_T0();
5251
                    gen_op_addq_T0_T1(rn, rd);
5252
                    gen_movl_reg_T0(s, rn);
5253
                    gen_movl_reg_T1(s, rd);
5254
                } else {
5255
                    if (op1 == 0) {
5256
                        gen_movl_T1_reg(s, rn);
5257
                        gen_op_addl_T0_T1_setq();
5258
                    }
5259
                    gen_movl_reg_T0(s, rd);
5260
                }
5261
            }
5262
            break;
5263
        default:
5264
            goto illegal_op;
5265
        }
5266
    } else if (((insn & 0x0e000000) == 0 &&
5267
                (insn & 0x00000090) != 0x90) ||
5268
               ((insn & 0x0e000000) == (1 << 25))) {
5269
        int set_cc, logic_cc, shiftop;
5270

    
5271
        op1 = (insn >> 21) & 0xf;
5272
        set_cc = (insn >> 20) & 1;
5273
        logic_cc = table_logic_cc[op1] & set_cc;
5274

    
5275
        /* data processing instruction */
5276
        if (insn & (1 << 25)) {
5277
            /* immediate operand */
5278
            val = insn & 0xff;
5279
            shift = ((insn >> 8) & 0xf) * 2;
5280
            if (shift)
5281
                val = (val >> shift) | (val << (32 - shift));
5282
            gen_op_movl_T1_im(val);
5283
            if (logic_cc && shift)
5284
                gen_set_CF_bit31(cpu_T[1]);
5285
        } else {
5286
            /* register */
5287
            rm = (insn) & 0xf;
5288
            gen_movl_T1_reg(s, rm);
5289
            shiftop = (insn >> 5) & 3;
5290
            if (!(insn & (1 << 4))) {
5291
                shift = (insn >> 7) & 0x1f;
5292
                gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
5293
            } else {
5294
                rs = (insn >> 8) & 0xf;
5295
                gen_movl_T0_reg(s, rs);
5296
                if (logic_cc) {
5297
                    gen_shift_T1_T0_cc[shiftop]();
5298
                } else {
5299
                    gen_shift_T1_T0[shiftop]();
5300
                }
5301
            }
5302
        }
5303
        if (op1 != 0x0f && op1 != 0x0d) {
5304
            rn = (insn >> 16) & 0xf;
5305
            gen_movl_T0_reg(s, rn);
5306
        }
5307
        rd = (insn >> 12) & 0xf;
5308
        switch(op1) {
5309
        case 0x00:
5310
            gen_op_andl_T0_T1();
5311
            gen_movl_reg_T0(s, rd);
5312
            if (logic_cc)
5313
                gen_op_logic_T0_cc();
5314
            break;
5315
        case 0x01:
5316
            gen_op_xorl_T0_T1();
5317
            gen_movl_reg_T0(s, rd);
5318
            if (logic_cc)
5319
                gen_op_logic_T0_cc();
5320
            break;
5321
        case 0x02:
5322
            if (set_cc && rd == 15) {
5323
                /* SUBS r15, ... is used for exception return.  */
5324
                if (IS_USER(s))
5325
                    goto illegal_op;
5326
                gen_op_subl_T0_T1_cc();
5327
                gen_exception_return(s);
5328
            } else {
5329
                if (set_cc)
5330
                    gen_op_subl_T0_T1_cc();
5331
                else
5332
                    gen_op_subl_T0_T1();
5333
                gen_movl_reg_T0(s, rd);
5334
            }
5335
            break;
5336
        case 0x03:
5337
            if (set_cc)
5338
                gen_op_rsbl_T0_T1_cc();
5339
            else
5340
                gen_op_rsbl_T0_T1();
5341
            gen_movl_reg_T0(s, rd);
5342
            break;
5343
        case 0x04:
5344
            if (set_cc)
5345
                gen_op_addl_T0_T1_cc();
5346
            else
5347
                gen_op_addl_T0_T1();
5348
            gen_movl_reg_T0(s, rd);
5349
            break;
5350
        case 0x05:
5351
            if (set_cc)
5352
                gen_op_adcl_T0_T1_cc();
5353
            else
5354
                gen_adc_T0_T1();
5355
            gen_movl_reg_T0(s, rd);
5356
            break;
5357
        case 0x06:
5358
            if (set_cc)
5359
                gen_op_sbcl_T0_T1_cc();
5360
            else
5361
                gen_sbc_T0_T1();
5362
            gen_movl_reg_T0(s, rd);
5363
            break;
5364
        case 0x07:
5365
            if (set_cc)
5366
                gen_op_rscl_T0_T1_cc();
5367
            else
5368
                gen_rsc_T0_T1();
5369
            gen_movl_reg_T0(s, rd);
5370
            break;
5371
        case 0x08:
5372
            if (set_cc) {
5373
                gen_op_andl_T0_T1();
5374
                gen_op_logic_T0_cc();
5375
            }
5376
            break;
5377
        case 0x09:
5378
            if (set_cc) {
5379
                gen_op_xorl_T0_T1();
5380
                gen_op_logic_T0_cc();
5381
            }
5382
            break;
5383
        case 0x0a:
5384
            if (set_cc) {
5385
                gen_op_subl_T0_T1_cc();
5386
            }
5387
            break;
5388
        case 0x0b:
5389
            if (set_cc) {
5390
                gen_op_addl_T0_T1_cc();
5391
            }
5392
            break;
5393
        case 0x0c:
5394
            gen_op_orl_T0_T1();
5395
            gen_movl_reg_T0(s, rd);
5396
            if (logic_cc)
5397
                gen_op_logic_T0_cc();
5398
            break;
5399
        case 0x0d:
5400
            if (logic_cc && rd == 15) {
5401
                /* MOVS r15, ... is used for exception return.  */
5402
                if (IS_USER(s))
5403
                    goto illegal_op;
5404
                gen_op_movl_T0_T1();
5405
                gen_exception_return(s);
5406
            } else {
5407
                gen_movl_reg_T1(s, rd);
5408
                if (logic_cc)
5409
                    gen_op_logic_T1_cc();
5410
            }
5411
            break;
5412
        case 0x0e:
5413
            gen_op_bicl_T0_T1();
5414
            gen_movl_reg_T0(s, rd);
5415
            if (logic_cc)
5416
                gen_op_logic_T0_cc();
5417
            break;
5418
        default:
5419
        case 0x0f:
5420
            gen_op_notl_T1();
5421
            gen_movl_reg_T1(s, rd);
5422
            if (logic_cc)
5423
                gen_op_logic_T1_cc();
5424
            break;
5425
        }
5426
    } else {
5427
        /* other instructions */
5428
        op1 = (insn >> 24) & 0xf;
5429
        switch(op1) {
5430
        case 0x0:
5431
        case 0x1:
5432
            /* multiplies, extra load/stores */
5433
            sh = (insn >> 5) & 3;
5434
            if (sh == 0) {
5435
                if (op1 == 0x0) {
5436
                    rd = (insn >> 16) & 0xf;
5437
                    rn = (insn >> 12) & 0xf;
5438
                    rs = (insn >> 8) & 0xf;
5439
                    rm = (insn) & 0xf;
5440
                    op1 = (insn >> 20) & 0xf;
5441
                    switch (op1) {
5442
                    case 0: case 1: case 2: case 3: case 6:
5443
                        /* 32 bit mul */
5444
                        gen_movl_T0_reg(s, rs);
5445
                        gen_movl_T1_reg(s, rm);
5446
                        gen_op_mul_T0_T1();
5447
                        if (insn & (1 << 22)) {
5448
                            /* Subtract (mls) */
5449
                            ARCH(6T2);
5450
                            gen_movl_T1_reg(s, rn);
5451
                            gen_op_rsbl_T0_T1();
5452
                        } else if (insn & (1 << 21)) {
5453
                            /* Add */
5454
                            gen_movl_T1_reg(s, rn);
5455
                            gen_op_addl_T0_T1();
5456
                        }
5457
                        if (insn & (1 << 20))
5458
                            gen_op_logic_T0_cc();
5459
                        gen_movl_reg_T0(s, rd);
5460
                        break;
5461
                    default:
5462
                        /* 64 bit mul */
5463
                        gen_movl_T0_reg(s, rs);
5464
                        gen_movl_T1_reg(s, rm);
5465
                        if (insn & (1 << 22))
5466
                            gen_op_imull_T0_T1();
5467
                        else
5468
                            gen_op_mull_T0_T1();
5469
                        if (insn & (1 << 21)) /* mult accumulate */
5470
                            gen_op_addq_T0_T1(rn, rd);
5471
                        if (!(insn & (1 << 23))) { /* double accumulate */
5472
                            ARCH(6);
5473
                            gen_op_addq_lo_T0_T1(rn);
5474
                            gen_op_addq_lo_T0_T1(rd);
5475
                        }
5476
                        if (insn & (1 << 20))
5477
                            gen_op_logicq_cc();
5478
                        gen_movl_reg_T0(s, rn);
5479
                        gen_movl_reg_T1(s, rd);
5480
                        break;
5481
                    }
5482
                } else {
5483
                    rn = (insn >> 16) & 0xf;
5484
                    rd = (insn >> 12) & 0xf;
5485
                    if (insn & (1 << 23)) {
5486
                        /* load/store exclusive */
5487
                        gen_movl_T1_reg(s, rn);
5488
                        if (insn & (1 << 20)) {
5489
                            gen_ldst(ldlex, s);
5490
                        } else {
5491
                            rm = insn & 0xf;
5492
                            gen_movl_T0_reg(s, rm);
5493
                            gen_ldst(stlex, s);
5494
                        }
5495
                        gen_movl_reg_T0(s, rd);
5496
                    } else {
5497
                        /* SWP instruction */
5498
                        rm = (insn) & 0xf;
5499

    
5500
                        gen_movl_T0_reg(s, rm);
5501
                        gen_movl_T1_reg(s, rn);
5502
                        if (insn & (1 << 22)) {
5503
                            gen_ldst(swpb, s);
5504
                        } else {
5505
                            gen_ldst(swpl, s);
5506
                        }
5507
                        gen_movl_reg_T0(s, rd);
5508
                    }
5509
                }
5510
            } else {
5511
                int address_offset;
5512
                int load;
5513
                /* Misc load/store */
5514
                rn = (insn >> 16) & 0xf;
5515
                rd = (insn >> 12) & 0xf;
5516
                gen_movl_T1_reg(s, rn);
5517
                if (insn & (1 << 24))
5518
                    gen_add_datah_offset(s, insn, 0);
5519
                address_offset = 0;
5520
                if (insn & (1 << 20)) {
5521
                    /* load */
5522
                    switch(sh) {
5523
                    case 1:
5524
                        gen_ldst(lduw, s);
5525
                        break;
5526
                    case 2:
5527
                        gen_ldst(ldsb, s);
5528
                        break;
5529
                    default:
5530
                    case 3:
5531
                        gen_ldst(ldsw, s);
5532
                        break;
5533
                    }
5534
                    load = 1;
5535
                } else if (sh & 2) {
5536
                    /* doubleword */
5537
                    if (sh & 1) {
5538
                        /* store */
5539
                        gen_movl_T0_reg(s, rd);
5540
                        gen_ldst(stl, s);
5541
                        gen_op_addl_T1_im(4);
5542
                        gen_movl_T0_reg(s, rd + 1);
5543
                        gen_ldst(stl, s);
5544
                        load = 0;
5545
                    } else {
5546
                        /* load */
5547
                        gen_ldst(ldl, s);
5548
                        gen_movl_reg_T0(s, rd);
5549
                        gen_op_addl_T1_im(4);
5550
                        gen_ldst(ldl, s);
5551
                        rd++;
5552
                        load = 1;
5553
                    }
5554
                    address_offset = -4;
5555
                } else {
5556
                    /* store */
5557
                    gen_movl_T0_reg(s, rd);
5558
                    gen_ldst(stw, s);
5559
                    load = 0;
5560
                }
5561
                /* Perform base writeback before the loaded value to
5562
                   ensure correct behavior with overlapping index registers.
5563
                   ldrd with base writeback is is undefined if the
5564
                   destination and index registers overlap.  */
5565
                if (!(insn & (1 << 24))) {
5566
                    gen_add_datah_offset(s, insn, address_offset);
5567
                    gen_movl_reg_T1(s, rn);
5568
                } else if (insn & (1 << 21)) {
5569
                    if (address_offset)
5570
                        gen_op_addl_T1_im(address_offset);
5571
                    gen_movl_reg_T1(s, rn);
5572
                }
5573
                if (load) {
5574
                    /* Complete the load.  */
5575
                    gen_movl_reg_T0(s, rd);
5576
                }
5577
            }
5578
            break;
5579
        case 0x4:
5580
        case 0x5:
5581
            goto do_ldst;
5582
        case 0x6:
5583
        case 0x7:
5584
            if (insn & (1 << 4)) {
5585
                ARCH(6);
5586
                /* Armv6 Media instructions.  */
5587
                rm = insn & 0xf;
5588
                rn = (insn >> 16) & 0xf;
5589
                rd = (insn >> 12) & 0xf;
5590
                rs = (insn >> 8) & 0xf;
5591
                switch ((insn >> 23) & 3) {
5592
                case 0: /* Parallel add/subtract.  */
5593
                    op1 = (insn >> 20) & 7;
5594
                    gen_movl_T0_reg(s, rn);
5595
                    gen_movl_T1_reg(s, rm);
5596
                    sh = (insn >> 5) & 7;
5597
                    if ((op1 & 3) == 0 || sh == 5 || sh == 6)
5598
                        goto illegal_op;
5599
                    gen_arm_parallel_addsub[op1][sh]();
5600
                    gen_movl_reg_T0(s, rd);
5601
                    break;
5602
                case 1:
5603
                    if ((insn & 0x00700020) == 0) {
5604
                        /* Hafword pack.  */
5605
                        tmp = load_reg(s, rn);
5606
                        tmp2 = load_reg(s, rm);
5607
                        shift = (insn >> 7) & 0x1f;
5608
                        if (shift)
5609
                            tcg_gen_shli_i32(tmp2, tmp2, shift);
5610
                        if (insn & (1 << 6)) {
5611
                            /* pkhtb */
5612
                            tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
5613
                            tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
5614
                        } else {
5615
                            /* pkhbt */
5616
                            tcg_gen_andi_i32(tmp, tmp, 0xffff);
5617
                            tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
5618
                        }
5619
                        tcg_gen_or_i32(tmp, tmp, tmp2);
5620
                        store_reg(s, rd, tmp);
5621
                    } else if ((insn & 0x00200020) == 0x00200000) {
5622
                        /* [us]sat */
5623
                        gen_movl_T1_reg(s, rm);
5624
                        shift = (insn >> 7) & 0x1f;
5625
                        if (insn & (1 << 6)) {
5626
                            if (shift == 0)
5627
                                shift = 31;
5628
                            gen_op_sarl_T1_im(shift);
5629
                        } else {
5630
                            gen_op_shll_T1_im(shift);
5631
                        }
5632
                        sh = (insn >> 16) & 0x1f;
5633
                        if (sh != 0) {
5634
                            if (insn & (1 << 22))
5635
                                gen_op_usat_T1(sh);
5636
                            else
5637
                                gen_op_ssat_T1(sh);
5638
                        }
5639
                        gen_movl_T1_reg(s, rd);
5640
                    } else if ((insn & 0x00300fe0) == 0x00200f20) {
5641
                        /* [us]sat16 */
5642
                        gen_movl_T1_reg(s, rm);
5643
                        sh = (insn >> 16) & 0x1f;
5644
                        if (sh != 0) {
5645
                            if (insn & (1 << 22))
5646
                                gen_op_usat16_T1(sh);
5647
                            else
5648
                                gen_op_ssat16_T1(sh);
5649
                        }
5650
                        gen_movl_T1_reg(s, rd);
5651
                    } else if ((insn & 0x00700fe0) == 0x00000fa0) {
5652
                        /* Select bytes.  */
5653
                        gen_movl_T0_reg(s, rn);
5654
                        gen_movl_T1_reg(s, rm);
5655
                        gen_op_sel_T0_T1();
5656
                        gen_movl_reg_T0(s, rd);
5657
                    } else if ((insn & 0x000003e0) == 0x00000060) {
5658
                        gen_movl_T1_reg(s, rm);
5659
                        shift = (insn >> 10) & 3;
5660
                        /* ??? In many cases it's not neccessary to do a
5661
                           rotate, a shift is sufficient.  */
5662
                        if (shift != 0)
5663
                            gen_op_rorl_T1_im(shift * 8);
5664
                        op1 = (insn >> 20) & 7;
5665
                        switch (op1) {
5666
                        case 0: gen_sxtb16(cpu_T[1]); break;
5667
                        case 2: gen_sxtb(cpu_T[1]);   break;
5668
                        case 3: gen_sxth(cpu_T[1]);   break;
5669
                        case 4: gen_uxtb16(cpu_T[1]); break;
5670
                        case 6: gen_uxtb(cpu_T[1]);   break;
5671
                        case 7: gen_uxth(cpu_T[1]);   break;
5672
                        default: goto illegal_op;
5673
                        }
5674
                        if (rn != 15) {
5675
                            tmp = load_reg(s, rn);
5676
                            if ((op1 & 3) == 0) {
5677
                                gen_add16(cpu_T[1], tmp);
5678
                            } else {
5679
                                tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
5680
                                dead_tmp(tmp);
5681
                            }
5682
                        }
5683
                        gen_movl_reg_T1(s, rd);
5684
                    } else if ((insn & 0x003f0f60) == 0x003f0f20) {
5685
                        /* rev */
5686
                        gen_movl_T0_reg(s, rm);
5687
                        if (insn & (1 << 22)) {
5688
                            if (insn & (1 << 7)) {
5689
                                gen_revsh(cpu_T[0]);
5690
                            } else {
5691
                                ARCH(6T2);
5692
                                gen_helper_rbit(cpu_T[0], cpu_T[0]);
5693
                            }
5694
                        } else {
5695
                            if (insn & (1 << 7))
5696
                                gen_rev16(cpu_T[0]);
5697
                            else
5698
                                gen_op_rev_T0();
5699
                        }
5700
                        gen_movl_reg_T0(s, rd);
5701
                    } else {
5702
                        goto illegal_op;
5703
                    }
5704
                    break;
5705
                case 2: /* Multiplies (Type 3).  */
5706
                    gen_movl_T0_reg(s, rm);
5707
                    gen_movl_T1_reg(s, rs);
5708
                    if (insn & (1 << 20)) {
5709
                        /* Signed multiply most significant [accumulate].  */
5710
                        gen_op_imull_T0_T1();
5711
                        if (insn & (1 << 5))
5712
                            gen_op_roundqd_T0_T1();
5713
                        else
5714
                            gen_op_movl_T0_T1();
5715
                        if (rn != 15) {
5716
                            gen_movl_T1_reg(s, rn);
5717
                            if (insn & (1 << 6)) {
5718
                                gen_op_addl_T0_T1();
5719
                            } else {
5720
                                gen_op_rsbl_T0_T1();
5721
                            }
5722
                        }
5723
                        gen_movl_reg_T0(s, rd);
5724
                    } else {
5725
                        if (insn & (1 << 5))
5726
                            gen_swap_half(cpu_T[1]);
5727
                        gen_smul_dual(cpu_T[0], cpu_T[1]);
5728
                        if (insn & (1 << 22)) {
5729
                            if (insn & (1 << 6)) {
5730
                                /* smlald */
5731
                                gen_op_addq_T0_T1_dual(rn, rd);
5732
                            } else {
5733
                                /* smlsld */
5734
                                gen_op_subq_T0_T1_dual(rn, rd);
5735
                            }
5736
                        } else {
5737
                            /* This addition cannot overflow.  */
5738
                            if (insn & (1 << 6)) {
5739
                                /* sm[ul]sd */
5740
                                gen_op_subl_T0_T1();
5741
                            } else {
5742
                                /* sm[ul]ad */
5743
                                gen_op_addl_T0_T1();
5744
                            }
5745
                            if (rn != 15)
5746
                              {
5747
                                gen_movl_T1_reg(s, rn);
5748
                                gen_op_addl_T0_T1_setq();
5749
                              }
5750
                            gen_movl_reg_T0(s, rd);
5751
                        }
5752
                    }
5753
                    break;
5754
                case 3:
5755
                    op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
5756
                    switch (op1) {
5757
                    case 0: /* Unsigned sum of absolute differences.  */
5758
                            goto illegal_op;
5759
                        gen_movl_T0_reg(s, rm);
5760
                        gen_movl_T1_reg(s, rs);
5761
                        gen_op_usad8_T0_T1();
5762
                        if (rn != 15) {
5763
                            gen_movl_T1_reg(s, rn);
5764
                            gen_op_addl_T0_T1();
5765
                        }
5766
                        gen_movl_reg_T0(s, rd);
5767
                        break;
5768
                    case 0x20: case 0x24: case 0x28: case 0x2c:
5769
                        /* Bitfield insert/clear.  */
5770
                        ARCH(6T2);
5771
                        shift = (insn >> 7) & 0x1f;
5772
                        i = (insn >> 16) & 0x1f;
5773
                        i = i + 1 - shift;
5774
                        if (rm == 15) {
5775
                            gen_op_movl_T1_im(0);
5776
                        } else {
5777
                            gen_movl_T1_reg(s, rm);
5778
                        }
5779
                        if (i != 32) {
5780
                            gen_movl_T0_reg(s, rd);
5781
                            gen_bfi(cpu_T[1], cpu_T[0], cpu_T[1],
5782
                                    shift, ((1u << i) - 1) << shift);
5783
                        }
5784
                        gen_movl_reg_T1(s, rd);
5785
                        break;
5786
                    case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
5787
                    case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5788
                        gen_movl_T1_reg(s, rm);
5789
                        shift = (insn >> 7) & 0x1f;
5790
                        i = ((insn >> 16) & 0x1f) + 1;
5791
                        if (shift + i > 32)
5792
                            goto illegal_op;
5793
                        if (i < 32) {
5794
                            if (op1 & 0x20) {
5795
                                gen_ubfx(cpu_T[1], shift, (1u << i) - 1);
5796
                            } else {
5797
                                gen_sbfx(cpu_T[1], shift, i);
5798
                            }
5799
                        }
5800
                        gen_movl_reg_T1(s, rd);
5801
                        break;
5802
                    default:
5803
                        goto illegal_op;
5804
                    }
5805
                    break;
5806
                }
5807
                break;
5808
            }
5809
        do_ldst:
5810
            /* Check for undefined extension instructions
5811
             * per the ARM Bible IE:
5812
             * xxxx 0111 1111 xxxx  xxxx xxxx 1111 xxxx
5813
             */
5814
            sh = (0xf << 20) | (0xf << 4);
5815
            if (op1 == 0x7 && ((insn & sh) == sh))
5816
            {
5817
                goto illegal_op;
5818
            }
5819
            /* load/store byte/word */
5820
            rn = (insn >> 16) & 0xf;
5821
            rd = (insn >> 12) & 0xf;
5822
            gen_movl_T1_reg(s, rn);
5823
            i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
5824
            if (insn & (1 << 24))
5825
                gen_add_data_offset(s, insn);
5826
            if (insn & (1 << 20)) {
5827
                /* load */
5828
                s->is_mem = 1;
5829
#if defined(CONFIG_USER_ONLY)
5830
                if (insn & (1 << 22))
5831
                    gen_op_ldub_raw();
5832
                else
5833
                    gen_op_ldl_raw();
5834
#else
5835
                if (insn & (1 << 22)) {
5836
                    if (i)
5837
                        gen_op_ldub_user();
5838
                    else
5839
                        gen_op_ldub_kernel();
5840
                } else {
5841
                    if (i)
5842
                        gen_op_ldl_user();
5843
                    else
5844
                        gen_op_ldl_kernel();
5845
                }
5846
#endif
5847
            } else {
5848
                /* store */
5849
                gen_movl_T0_reg(s, rd);
5850
#if defined(CONFIG_USER_ONLY)
5851
                if (insn & (1 << 22))
5852
                    gen_op_stb_raw();
5853
                else
5854
                    gen_op_stl_raw();
5855
#else
5856
                if (insn & (1 << 22)) {
5857
                    if (i)
5858
                        gen_op_stb_user();
5859
                    else
5860
                        gen_op_stb_kernel();
5861
                } else {
5862
                    if (i)
5863
                        gen_op_stl_user();
5864
                    else
5865
                        gen_op_stl_kernel();
5866
                }
5867
#endif
5868
            }
5869
            if (!(insn & (1 << 24))) {
5870
                gen_add_data_offset(s, insn);
5871
                gen_movl_reg_T1(s, rn);
5872
            } else if (insn & (1 << 21))
5873
                gen_movl_reg_T1(s, rn); {
5874
            }
5875
            if (insn & (1 << 20)) {
5876
                /* Complete the load.  */
5877
                if (rd == 15)
5878
                    gen_bx(s);
5879
                else
5880
                    gen_movl_reg_T0(s, rd);
5881
            }
5882
            break;
5883
        case 0x08:
5884
        case 0x09:
5885
            {
5886
                int j, n, user, loaded_base;
5887
                /* load/store multiple words */
5888
                /* XXX: store correct base if write back */
5889
                user = 0;
5890
                if (insn & (1 << 22)) {
5891
                    if (IS_USER(s))
5892
                        goto illegal_op; /* only usable in supervisor mode */
5893

    
5894
                    if ((insn & (1 << 15)) == 0)
5895
                        user = 1;
5896
                }
5897
                rn = (insn >> 16) & 0xf;
5898
                gen_movl_T1_reg(s, rn);
5899

    
5900
                /* compute total size */
5901
                loaded_base = 0;
5902
                n = 0;
5903
                for(i=0;i<16;i++) {
5904
                    if (insn & (1 << i))
5905
                        n++;
5906
                }
5907
                /* XXX: test invalid n == 0 case ? */
5908
                if (insn & (1 << 23)) {
5909
                    if (insn & (1 << 24)) {
5910
                        /* pre increment */
5911
                        gen_op_addl_T1_im(4);
5912
                    } else {
5913
                        /* post increment */
5914
                    }
5915
                } else {
5916
                    if (insn & (1 << 24)) {
5917
                        /* pre decrement */
5918
                        gen_op_addl_T1_im(-(n * 4));
5919
                    } else {
5920
                        /* post decrement */
5921
                        if (n != 1)
5922
                            gen_op_addl_T1_im(-((n - 1) * 4));
5923
                    }
5924
                }
5925
                j = 0;
5926
                for(i=0;i<16;i++) {
5927
                    if (insn & (1 << i)) {
5928
                        if (insn & (1 << 20)) {
5929
                            /* load */
5930
                            gen_ldst(ldl, s);
5931
                            if (i == 15) {
5932
                                gen_bx(s);
5933
                            } else if (user) {
5934
                                gen_op_movl_user_T0(i);
5935
                            } else if (i == rn) {
5936
                                gen_op_movl_T2_T0();
5937
                                loaded_base = 1;
5938
                            } else {
5939
                                gen_movl_reg_T0(s, i);
5940
                            }
5941
                        } else {
5942
                            /* store */
5943
                            if (i == 15) {
5944
                                /* special case: r15 = PC + 8 */
5945
                                val = (long)s->pc + 4;
5946
                                gen_op_movl_T0_im(val);
5947
                            } else if (user) {
5948
                                gen_op_movl_T0_user(i);
5949
                            } else {
5950
                                gen_movl_T0_reg(s, i);
5951
                            }
5952
                            gen_ldst(stl, s);
5953
                        }
5954
                        j++;
5955
                        /* no need to add after the last transfer */
5956
                        if (j != n)
5957
                            gen_op_addl_T1_im(4);
5958
                    }
5959
                }
5960
                if (insn & (1 << 21)) {
5961
                    /* write back */
5962
                    if (insn & (1 << 23)) {
5963
                        if (insn & (1 << 24)) {
5964
                            /* pre increment */
5965
                        } else {
5966
                            /* post increment */
5967
                            gen_op_addl_T1_im(4);
5968
                        }
5969
                    } else {
5970
                        if (insn & (1 << 24)) {
5971
                            /* pre decrement */
5972
                            if (n != 1)
5973
                                gen_op_addl_T1_im(-((n - 1) * 4));
5974
                        } else {
5975
                            /* post decrement */
5976
                            gen_op_addl_T1_im(-(n * 4));
5977
                        }
5978
                    }
5979
                    gen_movl_reg_T1(s, rn);
5980
                }
5981
                if (loaded_base) {
5982
                    gen_op_movl_T0_T2();
5983
                    gen_movl_reg_T0(s, rn);
5984
                }
5985
                if ((insn & (1 << 22)) && !user) {
5986
                    /* Restore CPSR from SPSR.  */
5987
                    gen_op_movl_T0_spsr();
5988
                    gen_op_movl_cpsr_T0(0xffffffff);
5989
                    s->is_jmp = DISAS_UPDATE;
5990
                }
5991
            }
5992
            break;
5993
        case 0xa:
5994
        case 0xb:
5995
            {
5996
                int32_t offset;
5997

    
5998
                /* branch (and link) */
5999
                val = (int32_t)s->pc;
6000
                if (insn & (1 << 24)) {
6001
                    gen_op_movl_T0_im(val);
6002
                    gen_movl_reg_T0(s, 14);
6003
                }
6004
                offset = (((int32_t)insn << 8) >> 8);
6005
                val += (offset << 2) + 4;
6006
                gen_jmp(s, val);
6007
            }
6008
            break;
6009
        case 0xc:
6010
        case 0xd:
6011
        case 0xe:
6012
            /* Coprocessor.  */
6013
            if (disas_coproc_insn(env, s, insn))
6014
                goto illegal_op;
6015
            break;
6016
        case 0xf:
6017
            /* swi */
6018
            gen_op_movl_T0_im((long)s->pc);
6019
            gen_set_pc_T0();
6020
            s->is_jmp = DISAS_SWI;
6021
            break;
6022
        default:
6023
        illegal_op:
6024
            gen_set_condexec(s);
6025
            gen_op_movl_T0_im((long)s->pc - 4);
6026
            gen_set_pc_T0();
6027
            gen_op_undef_insn();
6028
            s->is_jmp = DISAS_JUMP;
6029
            break;
6030
        }
6031
    }
6032
}
6033

    
6034
/* Return true if this is a Thumb-2 logical op.  */
6035
static int
6036
thumb2_logic_op(int op)
6037
{
6038
    return (op < 8);
6039
}
6040

    
6041
/* Generate code for a Thumb-2 data processing operation.  If CONDS is nonzero
6042
   then set condition code flags based on the result of the operation.
6043
   If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6044
   to the high bit of T1.
6045
   Returns zero if the opcode is valid.  */
6046

    
6047
static int
6048
gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6049
{
6050
    int logic_cc;
6051

    
6052
    logic_cc = 0;
6053
    switch (op) {
6054
    case 0: /* and */
6055
        gen_op_andl_T0_T1();
6056
        logic_cc = conds;
6057
        break;
6058
    case 1: /* bic */
6059
        gen_op_bicl_T0_T1();
6060
        logic_cc = conds;
6061
        break;
6062
    case 2: /* orr */
6063
        gen_op_orl_T0_T1();
6064
        logic_cc = conds;
6065
        break;
6066
    case 3: /* orn */
6067
        gen_op_notl_T1();
6068
        gen_op_orl_T0_T1();
6069
        logic_cc = conds;
6070
        break;
6071
    case 4: /* eor */
6072
        gen_op_xorl_T0_T1();
6073
        logic_cc = conds;
6074
        break;
6075
    case 8: /* add */
6076
        if (conds)
6077
            gen_op_addl_T0_T1_cc();
6078
        else
6079
            gen_op_addl_T0_T1();
6080
        break;
6081
    case 10: /* adc */
6082
        if (conds)
6083
            gen_op_adcl_T0_T1_cc();
6084
        else
6085
            gen_adc_T0_T1();
6086
        break;
6087
    case 11: /* sbc */
6088
        if (conds)
6089
            gen_op_sbcl_T0_T1_cc();
6090
        else
6091
            gen_sbc_T0_T1();
6092
        break;
6093
    case 13: /* sub */
6094
        if (conds)
6095
            gen_op_subl_T0_T1_cc();
6096
        else
6097
            gen_op_subl_T0_T1();
6098
        break;
6099
    case 14: /* rsb */
6100
        if (conds)
6101
            gen_op_rsbl_T0_T1_cc();
6102
        else
6103
            gen_op_rsbl_T0_T1();
6104
        break;
6105
    default: /* 5, 6, 7, 9, 12, 15. */
6106
        return 1;
6107
    }
6108
    if (logic_cc) {
6109
        gen_op_logic_T0_cc();
6110
        if (shifter_out)
6111
            gen_set_CF_bit31(cpu_T[1]);
6112
    }
6113
    return 0;
6114
}
6115

    
6116
/* Translate a 32-bit thumb instruction.  Returns nonzero if the instruction
6117
   is not legal.  */
6118
static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6119
{
6120
    uint32_t insn, imm, shift, offset, addr;
6121
    uint32_t rd, rn, rm, rs;
6122
    TCGv tmp;
6123
    int op;
6124
    int shiftop;
6125
    int conds;
6126
    int logic_cc;
6127

    
6128
    if (!(arm_feature(env, ARM_FEATURE_THUMB2)
6129
          || arm_feature (env, ARM_FEATURE_M))) {
6130
        /* Thumb-1 cores may need to tread bl and blx as a pair of
6131
           16-bit instructions to get correct prefetch abort behavior.  */
6132
        insn = insn_hw1;
6133
        if ((insn & (1 << 12)) == 0) {
6134
            /* Second half of blx.  */
6135
            offset = ((insn & 0x7ff) << 1);
6136
            gen_movl_T0_reg(s, 14);
6137
            gen_op_movl_T1_im(offset);
6138
            gen_op_addl_T0_T1();
6139
            gen_op_movl_T1_im(0xfffffffc);
6140
            gen_op_andl_T0_T1();
6141

    
6142
            addr = (uint32_t)s->pc;
6143
            gen_op_movl_T1_im(addr | 1);
6144
            gen_movl_reg_T1(s, 14);
6145
            gen_bx(s);
6146
            return 0;
6147
        }
6148
        if (insn & (1 << 11)) {
6149
            /* Second half of bl.  */
6150
            offset = ((insn & 0x7ff) << 1) | 1;
6151
            gen_movl_T0_reg(s, 14);
6152
            gen_op_movl_T1_im(offset);
6153
            gen_op_addl_T0_T1();
6154

    
6155
            addr = (uint32_t)s->pc;
6156
            gen_op_movl_T1_im(addr | 1);
6157
            gen_movl_reg_T1(s, 14);
6158
            gen_bx(s);
6159
            return 0;
6160
        }
6161
        if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
6162
            /* Instruction spans a page boundary.  Implement it as two
6163
               16-bit instructions in case the second half causes an
6164
               prefetch abort.  */
6165
            offset = ((int32_t)insn << 21) >> 9;
6166
            addr = s->pc + 2 + offset;
6167
            gen_op_movl_T0_im(addr);
6168
            gen_movl_reg_T0(s, 14);
6169
            return 0;
6170
        }
6171
        /* Fall through to 32-bit decode.  */
6172
    }
6173

    
6174
    insn = lduw_code(s->pc);
6175
    s->pc += 2;
6176
    insn |= (uint32_t)insn_hw1 << 16;
6177

    
6178
    if ((insn & 0xf800e800) != 0xf000e800) {
6179
        ARCH(6T2);
6180
    }
6181

    
6182
    rn = (insn >> 16) & 0xf;
6183
    rs = (insn >> 12) & 0xf;
6184
    rd = (insn >> 8) & 0xf;
6185
    rm = insn & 0xf;
6186
    switch ((insn >> 25) & 0xf) {
6187
    case 0: case 1: case 2: case 3:
6188
        /* 16-bit instructions.  Should never happen.  */
6189
        abort();
6190
    case 4:
6191
        if (insn & (1 << 22)) {
6192
            /* Other load/store, table branch.  */
6193
            if (insn & 0x01200000) {
6194
                /* Load/store doubleword.  */
6195
                if (rn == 15) {
6196
                    gen_op_movl_T1_im(s->pc & ~3);
6197
                } else {
6198
                    gen_movl_T1_reg(s, rn);
6199
                }
6200
                offset = (insn & 0xff) * 4;
6201
                if ((insn & (1 << 23)) == 0)
6202
                    offset = -offset;
6203
                if (insn & (1 << 24)) {
6204
                    gen_op_addl_T1_im(offset);
6205
                    offset = 0;
6206
                }
6207
                if (insn & (1 << 20)) {
6208
                    /* ldrd */
6209
                    gen_ldst(ldl, s);
6210
                    gen_movl_reg_T0(s, rs);
6211
                    gen_op_addl_T1_im(4);
6212
                    gen_ldst(ldl, s);
6213
                    gen_movl_reg_T0(s, rd);
6214
                } else {
6215
                    /* strd */
6216
                    gen_movl_T0_reg(s, rs);
6217
                    gen_ldst(stl, s);
6218
                    gen_op_addl_T1_im(4);
6219
                    gen_movl_T0_reg(s, rd);
6220
                    gen_ldst(stl, s);
6221
                }
6222
                if (insn & (1 << 21)) {
6223
                    /* Base writeback.  */
6224
                    if (rn == 15)
6225
                        goto illegal_op;
6226
                    gen_op_addl_T1_im(offset - 4);
6227
                    gen_movl_reg_T1(s, rn);
6228
                }
6229
            } else if ((insn & (1 << 23)) == 0) {
6230
                /* Load/store exclusive word.  */
6231
                gen_movl_T0_reg(s, rd);
6232
                gen_movl_T1_reg(s, rn);
6233
                if (insn & (1 << 20)) {
6234
                    gen_ldst(ldlex, s);
6235
                } else {
6236
                    gen_ldst(stlex, s);
6237
                }
6238
                gen_movl_reg_T0(s, rd);
6239
            } else if ((insn & (1 << 6)) == 0) {
6240
                /* Table Branch.  */
6241
                if (rn == 15) {
6242
                    gen_op_movl_T1_im(s->pc);
6243
                } else {
6244
                    gen_movl_T1_reg(s, rn);
6245
                }
6246
                tmp = load_reg(s, rm);
6247
                tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
6248
                if (insn & (1 << 4)) {
6249
                    /* tbh */
6250
                    tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
6251
                    dead_tmp(tmp);
6252
                    gen_ldst(lduw, s);
6253
                } else { /* tbb */
6254
                    dead_tmp(tmp);
6255
                    gen_ldst(ldub, s);
6256
                }
6257
                tcg_gen_shli_i32(cpu_T[0], cpu_T[0], 1);
6258
                tcg_gen_addi_i32(cpu_T[0], cpu_T[0], s->pc);
6259
                gen_movl_reg_T0(s, 15);
6260
            } else {
6261
                /* Load/store exclusive byte/halfword/doubleword.  */
6262
                op = (insn >> 4) & 0x3;
6263
                gen_movl_T1_reg(s, rn);
6264
                if (insn & (1 << 20)) {
6265
                    switch (op) {
6266
                    case 0:
6267
                        gen_ldst(ldbex, s);
6268
                        break;
6269
                    case 1:
6270
                        gen_ldst(ldwex, s);
6271
                        break;
6272
                    case 3:
6273
                        gen_ldst(ldqex, s);
6274
                        gen_movl_reg_T1(s, rd);
6275
                        break;
6276
                    default:
6277
                        goto illegal_op;
6278
                    }
6279
                    gen_movl_reg_T0(s, rs);
6280
                } else {
6281
                    gen_movl_T0_reg(s, rs);
6282
                    switch (op) {
6283
                    case 0:
6284
                        gen_ldst(stbex, s);
6285
                        break;
6286
                    case 1:
6287
                        gen_ldst(stwex, s);
6288
                        break;
6289
                    case 3:
6290
                        gen_movl_T2_reg(s, rd);
6291
                        gen_ldst(stqex, s);
6292
                        break;
6293
                    default:
6294
                        goto illegal_op;
6295
                    }
6296
                    gen_movl_reg_T0(s, rm);
6297
                }
6298
            }
6299
        } else {
6300
            /* Load/store multiple, RFE, SRS.  */
6301
            if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
6302
                /* Not available in user mode.  */
6303
                if (!IS_USER(s))
6304
                    goto illegal_op;
6305
                if (insn & (1 << 20)) {
6306
                    /* rfe */
6307
                    gen_movl_T1_reg(s, rn);
6308
                    if (insn & (1 << 24)) {
6309
                        gen_op_addl_T1_im(4);
6310
                    } else {
6311
                        gen_op_addl_T1_im(-4);
6312
                    }
6313
                    /* Load CPSR into T2 and PC into T0.  */
6314
                    gen_ldst(ldl, s);
6315
                    gen_op_movl_T2_T0();
6316
                    gen_op_addl_T1_im(-4);
6317
                    gen_ldst(ldl, s);
6318
                    if (insn & (1 << 21)) {
6319
                        /* Base writeback.  */
6320
                        if (insn & (1 << 24))
6321
                            gen_op_addl_T1_im(8);
6322
                        gen_movl_reg_T1(s, rn);
6323
                    }
6324
                    gen_rfe(s);
6325
                } else {
6326
                    /* srs */
6327
                    op = (insn & 0x1f);
6328
                    if (op == (env->uncached_cpsr & CPSR_M)) {
6329
                        gen_movl_T1_reg(s, 13);
6330
                    } else {
6331
                        gen_op_movl_T1_r13_banked(op);
6332
                    }
6333
                    if ((insn & (1 << 24)) == 0) {
6334
                        gen_op_addl_T1_im(-8);
6335
                    }
6336
                    gen_movl_T0_reg(s, 14);
6337
                    gen_ldst(stl, s);
6338
                    gen_op_movl_T0_cpsr();
6339
                    gen_op_addl_T1_im(4);
6340
                    gen_ldst(stl, s);
6341
                    if (insn & (1 << 21)) {
6342
                        if ((insn & (1 << 24)) == 0) {
6343
                            gen_op_addl_T1_im(-4);
6344
                        } else {
6345
                            gen_op_addl_T1_im(4);
6346
                        }
6347
                        if (op == (env->uncached_cpsr & CPSR_M)) {
6348
                            gen_movl_reg_T1(s, 13);
6349
                        } else {
6350
                            gen_op_movl_r13_T1_banked(op);
6351
                        }
6352
                    }
6353
                }
6354
            } else {
6355
                int i;
6356
                /* Load/store multiple.  */
6357
                gen_movl_T1_reg(s, rn);
6358
                offset = 0;
6359
                for (i = 0; i < 16; i++) {
6360
                    if (insn & (1 << i))
6361
                        offset += 4;
6362
                }
6363
                if (insn & (1 << 24)) {
6364
                    gen_op_addl_T1_im(-offset);
6365
                }
6366

    
6367
                for (i = 0; i < 16; i++) {
6368
                    if ((insn & (1 << i)) == 0)
6369
                        continue;
6370
                    if (insn & (1 << 20)) {
6371
                        /* Load.  */
6372
                        gen_ldst(ldl, s);
6373
                        if (i == 15) {
6374
                            gen_bx(s);
6375
                        } else {
6376
                            gen_movl_reg_T0(s, i);
6377
                        }
6378
                    } else {
6379
                        /* Store.  */
6380
                        gen_movl_T0_reg(s, i);
6381
                        gen_ldst(stl, s);
6382
                    }
6383
                    gen_op_addl_T1_im(4);
6384
                }
6385
                if (insn & (1 << 21)) {
6386
                    /* Base register writeback.  */
6387
                    if (insn & (1 << 24)) {
6388
                        gen_op_addl_T1_im(-offset);
6389
                    }
6390
                    /* Fault if writeback register is in register list.  */
6391
                    if (insn & (1 << rn))
6392
                        goto illegal_op;
6393
                    gen_movl_reg_T1(s, rn);
6394
                }
6395
            }
6396
        }
6397
        break;
6398
    case 5: /* Data processing register constant shift.  */
6399
        if (rn == 15)
6400
            gen_op_movl_T0_im(0);
6401
        else
6402
            gen_movl_T0_reg(s, rn);
6403
        gen_movl_T1_reg(s, rm);
6404
        op = (insn >> 21) & 0xf;
6405
        shiftop = (insn >> 4) & 3;
6406
        shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6407
        conds = (insn & (1 << 20)) != 0;
6408
        logic_cc = (conds && thumb2_logic_op(op));
6409
        gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
6410
        if (gen_thumb2_data_op(s, op, conds, 0))
6411
            goto illegal_op;
6412
        if (rd != 15)
6413
            gen_movl_reg_T0(s, rd);
6414
        break;
6415
    case 13: /* Misc data processing.  */
6416
        op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
6417
        if (op < 4 && (insn & 0xf000) != 0xf000)
6418
            goto illegal_op;
6419
        switch (op) {
6420
        case 0: /* Register controlled shift.  */
6421
            gen_movl_T0_reg(s, rm);
6422
            gen_movl_T1_reg(s, rn);
6423
            if ((insn & 0x70) != 0)
6424
                goto illegal_op;
6425
            op = (insn >> 21) & 3;
6426
            if (insn & (1 << 20)) {
6427
                gen_shift_T1_T0_cc[op]();
6428
                gen_op_logic_T1_cc();
6429
            } else {
6430
                gen_shift_T1_T0[op]();
6431
            }
6432
            gen_movl_reg_T1(s, rd);
6433
            break;
6434
        case 1: /* Sign/zero extend.  */
6435
            gen_movl_T1_reg(s, rm);
6436
            shift = (insn >> 4) & 3;
6437
            /* ??? In many cases it's not neccessary to do a
6438
               rotate, a shift is sufficient.  */
6439
            if (shift != 0)
6440
                gen_op_rorl_T1_im(shift * 8);
6441
            op = (insn >> 20) & 7;
6442
            switch (op) {
6443
            case 0: gen_sxth(cpu_T[1]);   break;
6444
            case 1: gen_uxth(cpu_T[1]);   break;
6445
            case 2: gen_sxtb16(cpu_T[1]); break;
6446
            case 3: gen_uxtb16(cpu_T[1]); break;
6447
            case 4: gen_sxtb(cpu_T[1]);   break;
6448
            case 5: gen_uxtb(cpu_T[1]);   break;
6449
            default: goto illegal_op;
6450
            }
6451
            if (rn != 15) {
6452
                tmp = load_reg(s, rn);
6453
                if ((op >> 1) == 1) {
6454
                    gen_add16(cpu_T[1], tmp);
6455
                } else {
6456
                    tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
6457
                    dead_tmp(tmp);
6458
                }
6459
            }
6460
            gen_movl_reg_T1(s, rd);
6461
            break;
6462
        case 2: /* SIMD add/subtract.  */
6463
            op = (insn >> 20) & 7;
6464
            shift = (insn >> 4) & 7;
6465
            if ((op & 3) == 3 || (shift & 3) == 3)
6466
                goto illegal_op;
6467
            gen_movl_T0_reg(s, rn);
6468
            gen_movl_T1_reg(s, rm);
6469
            gen_thumb2_parallel_addsub[op][shift]();
6470
            gen_movl_reg_T0(s, rd);
6471
            break;
6472
        case 3: /* Other data processing.  */
6473
            op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
6474
            if (op < 4) {
6475
                /* Saturating add/subtract.  */
6476
                gen_movl_T0_reg(s, rm);
6477
                gen_movl_T1_reg(s, rn);
6478
                if (op & 2)
6479
                    gen_helper_double_saturate(cpu_T[1], cpu_T[1]);
6480
                if (op & 1)
6481
                    gen_op_subl_T0_T1_saturate();
6482
                else
6483
                    gen_op_addl_T0_T1_saturate();
6484
            } else {
6485
                gen_movl_T0_reg(s, rn);
6486
                switch (op) {
6487
                case 0x0a: /* rbit */
6488
                    gen_helper_rbit(cpu_T[0], cpu_T[0]);
6489
                    break;
6490
                case 0x08: /* rev */
6491
                    gen_op_rev_T0();
6492
                    break;
6493
                case 0x09: /* rev16 */
6494
                    gen_rev16(cpu_T[0]);
6495
                    break;
6496
                case 0x0b: /* revsh */
6497
                    gen_revsh(cpu_T[0]);
6498
                    break;
6499
                case 0x10: /* sel */
6500
                    gen_movl_T1_reg(s, rm);
6501
                    gen_op_sel_T0_T1();
6502
                    break;
6503
                case 0x18: /* clz */
6504
                    gen_helper_clz(cpu_T[0], cpu_T[0]);
6505
                    break;
6506
                default:
6507
                    goto illegal_op;
6508
                }
6509
            }
6510
            gen_movl_reg_T0(s, rd);
6511
            break;
6512
        case 4: case 5: /* 32-bit multiply.  Sum of absolute differences.  */
6513
            op = (insn >> 4) & 0xf;
6514
            gen_movl_T0_reg(s, rn);
6515
            gen_movl_T1_reg(s, rm);
6516
            switch ((insn >> 20) & 7) {
6517
            case 0: /* 32 x 32 -> 32 */
6518
                gen_op_mul_T0_T1();
6519
                if (rs != 15) {
6520
                    gen_movl_T1_reg(s, rs);
6521
                    if (op)
6522
                        gen_op_rsbl_T0_T1();
6523
                    else
6524
                        gen_op_addl_T0_T1();
6525
                }
6526
                gen_movl_reg_T0(s, rd);
6527
                break;
6528
            case 1: /* 16 x 16 -> 32 */
6529
                gen_mulxy(op & 2, op & 1);
6530
                if (rs != 15) {
6531
                    gen_movl_T1_reg(s, rs);
6532
                    gen_op_addl_T0_T1_setq();
6533
                }
6534
                gen_movl_reg_T0(s, rd);
6535
                break;
6536
            case 2: /* Dual multiply add.  */
6537
            case 4: /* Dual multiply subtract.  */
6538
                if (op)
6539
                    gen_swap_half(cpu_T[1]);
6540
                gen_smul_dual(cpu_T[0], cpu_T[1]);
6541
                /* This addition cannot overflow.  */
6542
                if (insn & (1 << 22)) {
6543
                    gen_op_subl_T0_T1();
6544
                } else {
6545
                    gen_op_addl_T0_T1();
6546
                }
6547
                if (rs != 15)
6548
                  {
6549
                    gen_movl_T1_reg(s, rs);
6550
                    gen_op_addl_T0_T1_setq();
6551
                  }
6552
                gen_movl_reg_T0(s, rd);
6553
                break;
6554
            case 3: /* 32 * 16 -> 32msb */
6555
                if (op)
6556
                    gen_op_sarl_T1_im(16);
6557
                else
6558
                    gen_sxth(cpu_T[1]);
6559
                gen_op_imulw_T0_T1();
6560
                if (rs != 15)
6561
                  {
6562
                    gen_movl_T1_reg(s, rs);
6563
                    gen_op_addl_T0_T1_setq();
6564
                  }
6565
                gen_movl_reg_T0(s, rd);
6566
                break;
6567
            case 5: case 6: /* 32 * 32 -> 32msb */
6568
                gen_op_imull_T0_T1();
6569
                if (insn & (1 << 5))
6570
                    gen_op_roundqd_T0_T1();
6571
                else
6572
                    gen_op_movl_T0_T1();
6573
                if (rs != 15) {
6574
                    gen_movl_T1_reg(s, rs);
6575
                    if (insn & (1 << 21)) {
6576
                        gen_op_addl_T0_T1();
6577
                    } else {
6578
                        gen_op_rsbl_T0_T1();
6579
                    }
6580
                }
6581
                gen_movl_reg_T0(s, rd);
6582
                break;
6583
            case 7: /* Unsigned sum of absolute differences.  */
6584
                gen_op_usad8_T0_T1();
6585
                if (rs != 15) {
6586
                    gen_movl_T1_reg(s, rs);
6587
                    gen_op_addl_T0_T1();
6588
                }
6589
                gen_movl_reg_T0(s, rd);
6590
                break;
6591
            }
6592
            break;
6593
        case 6: case 7: /* 64-bit multiply, Divide.  */
6594
            op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
6595
            gen_movl_T0_reg(s, rn);
6596
            gen_movl_T1_reg(s, rm);
6597
            if ((op & 0x50) == 0x10) {
6598
                /* sdiv, udiv */
6599
                if (!arm_feature(env, ARM_FEATURE_DIV))
6600
                    goto illegal_op;
6601
                if (op & 0x20)
6602
                    gen_helper_udiv(cpu_T[0], cpu_T[0], cpu_T[1]);
6603
                else
6604
                    gen_helper_sdiv(cpu_T[0], cpu_T[0], cpu_T[1]);
6605
                gen_movl_reg_T0(s, rd);
6606
            } else if ((op & 0xe) == 0xc) {
6607
                /* Dual multiply accumulate long.  */
6608
                if (op & 1)
6609
                    gen_swap_half(cpu_T[1]);
6610
                gen_smul_dual(cpu_T[0], cpu_T[1]);
6611
                if (op & 0x10) {
6612
                    gen_op_subl_T0_T1();
6613
                } else {
6614
                    gen_op_addl_T0_T1();
6615
                }
6616
                gen_op_signbit_T1_T0();
6617
                gen_op_addq_T0_T1(rs, rd);
6618
                gen_movl_reg_T0(s, rs);
6619
                gen_movl_reg_T1(s, rd);
6620
            } else {
6621
                if (op & 0x20) {
6622
                    /* Unsigned 64-bit multiply  */
6623
                    gen_op_mull_T0_T1();
6624
                } else {
6625
                    if (op & 8) {
6626
                        /* smlalxy */
6627
                        gen_mulxy(op & 2, op & 1);
6628
                        gen_op_signbit_T1_T0();
6629
                    } else {
6630
                        /* Signed 64-bit multiply  */
6631
                        gen_op_imull_T0_T1();
6632
                    }
6633
                }
6634
                if (op & 4) {
6635
                    /* umaal */
6636
                    gen_op_addq_lo_T0_T1(rs);
6637
                    gen_op_addq_lo_T0_T1(rd);
6638
                } else if (op & 0x40) {
6639
                    /* 64-bit accumulate.  */
6640
                    gen_op_addq_T0_T1(rs, rd);
6641
                }
6642
                gen_movl_reg_T0(s, rs);
6643
                gen_movl_reg_T1(s, rd);
6644
            }
6645
            break;
6646
        }
6647
        break;
6648
    case 6: case 7: case 14: case 15:
6649
        /* Coprocessor.  */
6650
        if (((insn >> 24) & 3) == 3) {
6651
            /* Translate into the equivalent ARM encoding.  */
6652
            insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
6653
            if (disas_neon_data_insn(env, s, insn))
6654
                goto illegal_op;
6655
        } else {
6656
            if (insn & (1 << 28))
6657
                goto illegal_op;
6658
            if (disas_coproc_insn (env, s, insn))
6659
                goto illegal_op;
6660
        }
6661
        break;
6662
    case 8: case 9: case 10: case 11:
6663
        if (insn & (1 << 15)) {
6664
            /* Branches, misc control.  */
6665
            if (insn & 0x5000) {
6666
                /* Unconditional branch.  */
6667
                /* signextend(hw1[10:0]) -> offset[:12].  */
6668
                offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
6669
                /* hw1[10:0] -> offset[11:1].  */
6670
                offset |= (insn & 0x7ff) << 1;
6671
                /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
6672
                   offset[24:22] already have the same value because of the
6673
                   sign extension above.  */
6674
                offset ^= ((~insn) & (1 << 13)) << 10;
6675
                offset ^= ((~insn) & (1 << 11)) << 11;
6676

    
6677
                addr = s->pc;
6678
                if (insn & (1 << 14)) {
6679
                    /* Branch and link.  */
6680
                    gen_op_movl_T1_im(addr | 1);
6681
                    gen_movl_reg_T1(s, 14);
6682
                }
6683

    
6684
                addr += offset;
6685
                if (insn & (1 << 12)) {
6686
                    /* b/bl */
6687
                    gen_jmp(s, addr);
6688
                } else {
6689
                    /* blx */
6690
                    addr &= ~(uint32_t)2;
6691
                    gen_op_movl_T0_im(addr);
6692
                    gen_bx(s);
6693
                }
6694
            } else if (((insn >> 23) & 7) == 7) {
6695
                /* Misc control */
6696
                if (insn & (1 << 13))
6697
                    goto illegal_op;
6698

    
6699
                if (insn & (1 << 26)) {
6700
                    /* Secure monitor call (v6Z) */
6701
                    goto illegal_op; /* not implemented.  */
6702
                } else {
6703
                    op = (insn >> 20) & 7;
6704
                    switch (op) {
6705
                    case 0: /* msr cpsr.  */
6706
                        if (IS_M(env)) {
6707
                            gen_op_v7m_msr_T0(insn & 0xff);
6708
                            gen_movl_reg_T0(s, rn);
6709
                            gen_lookup_tb(s);
6710
                            break;
6711
                        }
6712
                        /* fall through */
6713
                    case 1: /* msr spsr.  */
6714
                        if (IS_M(env))
6715
                            goto illegal_op;
6716
                        gen_movl_T0_reg(s, rn);
6717
                        if (gen_set_psr_T0(s,
6718
                              msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
6719
                              op == 1))
6720
                            goto illegal_op;
6721
                        break;
6722
                    case 2: /* cps, nop-hint.  */
6723
                        if (((insn >> 8) & 7) == 0) {
6724
                            gen_nop_hint(s, insn & 0xff);
6725
                        }
6726
                        /* Implemented as NOP in user mode.  */
6727
                        if (IS_USER(s))
6728
                            break;
6729
                        offset = 0;
6730
                        imm = 0;
6731
                        if (insn & (1 << 10)) {
6732
                            if (insn & (1 << 7))
6733
                                offset |= CPSR_A;
6734
                            if (insn & (1 << 6))
6735
                                offset |= CPSR_I;
6736
                            if (insn & (1 << 5))
6737
                                offset |= CPSR_F;
6738
                            if (insn & (1 << 9))
6739
                                imm = CPSR_A | CPSR_I | CPSR_F;
6740
                        }
6741
                        if (insn & (1 << 8)) {
6742
                            offset |= 0x1f;
6743
                            imm |= (insn & 0x1f);
6744
                        }
6745
                        if (offset) {
6746
                            gen_op_movl_T0_im(imm);
6747
                            gen_set_psr_T0(s, offset, 0);
6748
                        }
6749
                        break;
6750
                    case 3: /* Special control operations.  */
6751
                        op = (insn >> 4) & 0xf;
6752
                        switch (op) {
6753
                        case 2: /* clrex */
6754
                            gen_op_clrex();
6755
                            break;
6756
                        case 4: /* dsb */
6757
                        case 5: /* dmb */
6758
                        case 6: /* isb */
6759
                            /* These execute as NOPs.  */
6760
                            ARCH(7);
6761
                            break;
6762
                        default:
6763
                            goto illegal_op;
6764
                        }
6765
                        break;
6766
                    case 4: /* bxj */
6767
                        /* Trivial implementation equivalent to bx.  */
6768
                        gen_movl_T0_reg(s, rn);
6769
                        gen_bx(s);
6770
                        break;
6771
                    case 5: /* Exception return.  */
6772
                        /* Unpredictable in user mode.  */
6773
                        goto illegal_op;
6774
                    case 6: /* mrs cpsr.  */
6775
                        if (IS_M(env)) {
6776
                            gen_op_v7m_mrs_T0(insn & 0xff);
6777
                        } else {
6778
                            gen_op_movl_T0_cpsr();
6779
                        }
6780
                        gen_movl_reg_T0(s, rd);
6781
                        break;
6782
                    case 7: /* mrs spsr.  */
6783
                        /* Not accessible in user mode.  */
6784
                        if (IS_USER(s) || IS_M(env))
6785
                            goto illegal_op;
6786
                        gen_op_movl_T0_spsr();
6787
                        gen_movl_reg_T0(s, rd);
6788
                        break;
6789
                    }
6790
                }
6791
            } else {
6792
                /* Conditional branch.  */
6793
                op = (insn >> 22) & 0xf;
6794
                /* Generate a conditional jump to next instruction.  */
6795
                s->condlabel = gen_new_label();
6796
                gen_test_cc[op ^ 1](s->condlabel);
6797
                s->condjmp = 1;
6798

    
6799
                /* offset[11:1] = insn[10:0] */
6800
                offset = (insn & 0x7ff) << 1;
6801
                /* offset[17:12] = insn[21:16].  */
6802
                offset |= (insn & 0x003f0000) >> 4;
6803
                /* offset[31:20] = insn[26].  */
6804
                offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
6805
                /* offset[18] = insn[13].  */
6806
                offset |= (insn & (1 << 13)) << 5;
6807
                /* offset[19] = insn[11].  */
6808
                offset |= (insn & (1 << 11)) << 8;
6809

    
6810
                /* jump to the offset */
6811
                addr = s->pc + offset;
6812
                gen_jmp(s, addr);
6813
            }
6814
        } else {
6815
            /* Data processing immediate.  */
6816
            if (insn & (1 << 25)) {
6817
                if (insn & (1 << 24)) {
6818
                    if (insn & (1 << 20))
6819
                        goto illegal_op;
6820
                    /* Bitfield/Saturate.  */
6821
                    op = (insn >> 21) & 7;
6822
                    imm = insn & 0x1f;
6823
                    shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6824
                    if (rn == 15)
6825
                        gen_op_movl_T1_im(0);
6826
                    else
6827
                        gen_movl_T1_reg(s, rn);
6828
                    switch (op) {
6829
                    case 2: /* Signed bitfield extract.  */
6830
                        imm++;
6831
                        if (shift + imm > 32)
6832
                            goto illegal_op;
6833
                        if (imm < 32)
6834
                            gen_sbfx(cpu_T[1], shift, imm);
6835
                        break;
6836
                    case 6: /* Unsigned bitfield extract.  */
6837
                        imm++;
6838
                        if (shift + imm > 32)
6839
                            goto illegal_op;
6840
                        if (imm < 32)
6841
                            gen_ubfx(cpu_T[1], shift, (1u << imm) - 1);
6842
                        break;
6843
                    case 3: /* Bitfield insert/clear.  */
6844
                        if (imm < shift)
6845
                            goto illegal_op;
6846
                        imm = imm + 1 - shift;
6847
                        if (imm != 32) {
6848
                            gen_movl_T0_reg(s, rd);
6849
                            gen_bfi(cpu_T[1], cpu_T[0], cpu_T[1],
6850
                                    shift, ((1u << imm) - 1) << shift);
6851
                        }
6852
                        break;
6853
                    case 7:
6854
                        goto illegal_op;
6855
                    default: /* Saturate.  */
6856
                        gen_movl_T1_reg(s, rn);
6857
                        if (shift) {
6858
                            if (op & 1)
6859
                                gen_op_sarl_T1_im(shift);
6860
                            else
6861
                                gen_op_shll_T1_im(shift);
6862
                        }
6863
                        if (op & 4) {
6864
                            /* Unsigned.  */
6865
                            gen_op_ssat_T1(imm);
6866
                            if ((op & 1) && shift == 0)
6867
                                gen_op_usat16_T1(imm);
6868
                            else
6869
                                gen_op_usat_T1(imm);
6870
                        } else {
6871
                            /* Signed.  */
6872
                            gen_op_ssat_T1(imm);
6873
                            if ((op & 1) && shift == 0)
6874
                                gen_op_ssat16_T1(imm);
6875
                            else
6876
                                gen_op_ssat_T1(imm);
6877
                        }
6878
                        break;
6879
                    }
6880
                    gen_movl_reg_T1(s, rd);
6881
                } else {
6882
                    imm = ((insn & 0x04000000) >> 15)
6883
                          | ((insn & 0x7000) >> 4) | (insn & 0xff);
6884
                    if (insn & (1 << 22)) {
6885
                        /* 16-bit immediate.  */
6886
                        imm |= (insn >> 4) & 0xf000;
6887
                        if (insn & (1 << 23)) {
6888
                            /* movt */
6889
                            gen_movl_T0_reg(s, rd);
6890
                            tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
6891
                            tcg_gen_ori_i32(cpu_T[0], cpu_T[0], imm << 16);
6892
                        } else {
6893
                            /* movw */
6894
                            gen_op_movl_T0_im(imm);
6895
                        }
6896
                    } else {
6897
                        /* Add/sub 12-bit immediate.  */
6898
                        if (rn == 15) {
6899
                            addr = s->pc & ~(uint32_t)3;
6900
                            if (insn & (1 << 23))
6901
                                addr -= imm;
6902
                            else
6903
                                addr += imm;
6904
                            gen_op_movl_T0_im(addr);
6905
                        } else {
6906
                            gen_movl_T0_reg(s, rn);
6907
                            gen_op_movl_T1_im(imm);
6908
                            if (insn & (1 << 23))
6909
                                gen_op_subl_T0_T1();
6910
                            else
6911
                                gen_op_addl_T0_T1();
6912
                        }
6913
                    }
6914
                    gen_movl_reg_T0(s, rd);
6915
                }
6916
            } else {
6917
                int shifter_out = 0;
6918
                /* modified 12-bit immediate.  */
6919
                shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
6920
                imm = (insn & 0xff);
6921
                switch (shift) {
6922
                case 0: /* XY */
6923
                    /* Nothing to do.  */
6924
                    break;
6925
                case 1: /* 00XY00XY */
6926
                    imm |= imm << 16;
6927
                    break;
6928
                case 2: /* XY00XY00 */
6929
                    imm |= imm << 16;
6930
                    imm <<= 8;
6931
                    break;
6932
                case 3: /* XYXYXYXY */
6933
                    imm |= imm << 16;
6934
                    imm |= imm << 8;
6935
                    break;
6936
                default: /* Rotated constant.  */
6937
                    shift = (shift << 1) | (imm >> 7);
6938
                    imm |= 0x80;
6939
                    imm = imm << (32 - shift);
6940
                    shifter_out = 1;
6941
                    break;
6942
                }
6943
                gen_op_movl_T1_im(imm);
6944
                rn = (insn >> 16) & 0xf;
6945
                if (rn == 15)
6946
                    gen_op_movl_T0_im(0);
6947
                else
6948
                    gen_movl_T0_reg(s, rn);
6949
                op = (insn >> 21) & 0xf;
6950
                if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
6951
                                       shifter_out))
6952
                    goto illegal_op;
6953
                rd = (insn >> 8) & 0xf;
6954
                if (rd != 15) {
6955
                    gen_movl_reg_T0(s, rd);
6956
                }
6957
            }
6958
        }
6959
        break;
6960
    case 12: /* Load/store single data item.  */
6961
        {
6962
        int postinc = 0;
6963
        int writeback = 0;
6964
        if ((insn & 0x01100000) == 0x01000000) {
6965
            if (disas_neon_ls_insn(env, s, insn))
6966
                goto illegal_op;
6967
            break;
6968
        }
6969
        if (rn == 15) {
6970
            /* PC relative.  */
6971
            /* s->pc has already been incremented by 4.  */
6972
            imm = s->pc & 0xfffffffc;
6973
            if (insn & (1 << 23))
6974
                imm += insn & 0xfff;
6975
            else
6976
                imm -= insn & 0xfff;
6977
            gen_op_movl_T1_im(imm);
6978
        } else {
6979
            gen_movl_T1_reg(s, rn);
6980
            if (insn & (1 << 23)) {
6981
                /* Positive offset.  */
6982
                imm = insn & 0xfff;
6983
                gen_op_addl_T1_im(imm);
6984
            } else {
6985
                op = (insn >> 8) & 7;
6986
                imm = insn & 0xff;
6987
                switch (op) {
6988
                case 0: case 8: /* Shifted Register.  */
6989
                    shift = (insn >> 4) & 0xf;
6990
                    if (shift > 3)
6991
                        goto illegal_op;
6992
                    tmp = load_reg(s, rm);
6993
                    if (shift)
6994
                        tcg_gen_shli_i32(tmp, tmp, shift);
6995
                    tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
6996
                    dead_tmp(tmp);
6997
                    break;
6998
                case 4: /* Negative offset.  */
6999
                    gen_op_addl_T1_im(-imm);
7000
                    break;
7001
                case 6: /* User privilege.  */
7002
                    gen_op_addl_T1_im(imm);
7003
                    break;
7004
                case 1: /* Post-decrement.  */
7005
                    imm = -imm;
7006
                    /* Fall through.  */
7007
                case 3: /* Post-increment.  */
7008
                    postinc = 1;
7009
                    writeback = 1;
7010
                    break;
7011
                case 5: /* Pre-decrement.  */
7012
                    imm = -imm;
7013
                    /* Fall through.  */
7014
                case 7: /* Pre-increment.  */
7015
                    gen_op_addl_T1_im(imm);
7016
                    writeback = 1;
7017
                    break;
7018
                default:
7019
                    goto illegal_op;
7020
                }
7021
            }
7022
        }
7023
        op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7024
        if (insn & (1 << 20)) {
7025
            /* Load.  */
7026
            if (rs == 15 && op != 2) {
7027
                if (op & 2)
7028
                    goto illegal_op;
7029
                /* Memory hint.  Implemented as NOP.  */
7030
            } else {
7031
                switch (op) {
7032
                case 0: gen_ldst(ldub, s); break;
7033
                case 4: gen_ldst(ldsb, s); break;
7034
                case 1: gen_ldst(lduw, s); break;
7035
                case 5: gen_ldst(ldsw, s); break;
7036
                case 2: gen_ldst(ldl, s); break;
7037
                default: goto illegal_op;
7038
                }
7039
                if (rs == 15) {
7040
                    gen_bx(s);
7041
                } else {
7042
                    gen_movl_reg_T0(s, rs);
7043
                }
7044
            }
7045
        } else {
7046
            /* Store.  */
7047
            if (rs == 15)
7048
                goto illegal_op;
7049
            gen_movl_T0_reg(s, rs);
7050
            switch (op) {
7051
            case 0: gen_ldst(stb, s); break;
7052
            case 1: gen_ldst(stw, s); break;
7053
            case 2: gen_ldst(stl, s); break;
7054
            default: goto illegal_op;
7055
            }
7056
        }
7057
        if (postinc)
7058
            gen_op_addl_T1_im(imm);
7059
        if (writeback)
7060
            gen_movl_reg_T1(s, rn);
7061
        }
7062
        break;
7063
    default:
7064
        goto illegal_op;
7065
    }
7066
    return 0;
7067
illegal_op:
7068
    return 1;
7069
}
7070

    
7071
static void disas_thumb_insn(CPUState *env, DisasContext *s)
7072
{
7073
    uint32_t val, insn, op, rm, rn, rd, shift, cond;
7074
    int32_t offset;
7075
    int i;
7076
    TCGv tmp;
7077

    
7078
    if (s->condexec_mask) {
7079
        cond = s->condexec_cond;
7080
        s->condlabel = gen_new_label();
7081
        gen_test_cc[cond ^ 1](s->condlabel);
7082
        s->condjmp = 1;
7083
    }
7084

    
7085
    insn = lduw_code(s->pc);
7086
    s->pc += 2;
7087

    
7088
    switch (insn >> 12) {
7089
    case 0: case 1:
7090
        rd = insn & 7;
7091
        op = (insn >> 11) & 3;
7092
        if (op == 3) {
7093
            /* add/subtract */
7094
            rn = (insn >> 3) & 7;
7095
            gen_movl_T0_reg(s, rn);
7096
            if (insn & (1 << 10)) {
7097
                /* immediate */
7098
                gen_op_movl_T1_im((insn >> 6) & 7);
7099
            } else {
7100
                /* reg */
7101
                rm = (insn >> 6) & 7;
7102
                gen_movl_T1_reg(s, rm);
7103
            }
7104
            if (insn & (1 << 9)) {
7105
                if (s->condexec_mask)
7106
                    gen_op_subl_T0_T1();
7107
                else
7108
                    gen_op_subl_T0_T1_cc();
7109
            } else {
7110
                if (s->condexec_mask)
7111
                    gen_op_addl_T0_T1();
7112
                else
7113
                    gen_op_addl_T0_T1_cc();
7114
            }
7115
            gen_movl_reg_T0(s, rd);
7116
        } else {
7117
            /* shift immediate */
7118
            rm = (insn >> 3) & 7;
7119
            shift = (insn >> 6) & 0x1f;
7120
            tmp = load_reg(s, rm);
7121
            gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
7122
            if (!s->condexec_mask)
7123
                gen_logic_CC(tmp);
7124
            store_reg(s, rd, tmp);
7125
        }
7126
        break;
7127
    case 2: case 3:
7128
        /* arithmetic large immediate */
7129
        op = (insn >> 11) & 3;
7130
        rd = (insn >> 8) & 0x7;
7131
        if (op == 0) {
7132
            gen_op_movl_T0_im(insn & 0xff);
7133
        } else {
7134
            gen_movl_T0_reg(s, rd);
7135
            gen_op_movl_T1_im(insn & 0xff);
7136
        }
7137
        switch (op) {
7138
        case 0: /* mov */
7139
            if (!s->condexec_mask)
7140
                gen_op_logic_T0_cc();
7141
            break;
7142
        case 1: /* cmp */
7143
            gen_op_subl_T0_T1_cc();
7144
            break;
7145
        case 2: /* add */
7146
            if (s->condexec_mask)
7147
                gen_op_addl_T0_T1();
7148
            else
7149
                gen_op_addl_T0_T1_cc();
7150
            break;
7151
        case 3: /* sub */
7152
            if (s->condexec_mask)
7153
                gen_op_subl_T0_T1();
7154
            else
7155
                gen_op_subl_T0_T1_cc();
7156
            break;
7157
        }
7158
        if (op != 1)
7159
            gen_movl_reg_T0(s, rd);
7160
        break;
7161
    case 4:
7162
        if (insn & (1 << 11)) {
7163
            rd = (insn >> 8) & 7;
7164
            /* load pc-relative.  Bit 1 of PC is ignored.  */
7165
            val = s->pc + 2 + ((insn & 0xff) * 4);
7166
            val &= ~(uint32_t)2;
7167
            gen_op_movl_T1_im(val);
7168
            gen_ldst(ldl, s);
7169
            gen_movl_reg_T0(s, rd);
7170
            break;
7171
        }
7172
        if (insn & (1 << 10)) {
7173
            /* data processing extended or blx */
7174
            rd = (insn & 7) | ((insn >> 4) & 8);
7175
            rm = (insn >> 3) & 0xf;
7176
            op = (insn >> 8) & 3;
7177
            switch (op) {
7178
            case 0: /* add */
7179
                gen_movl_T0_reg(s, rd);
7180
                gen_movl_T1_reg(s, rm);
7181
                gen_op_addl_T0_T1();
7182
                gen_movl_reg_T0(s, rd);
7183
                break;
7184
            case 1: /* cmp */
7185
                gen_movl_T0_reg(s, rd);
7186
                gen_movl_T1_reg(s, rm);
7187
                gen_op_subl_T0_T1_cc();
7188
                break;
7189
            case 2: /* mov/cpy */
7190
                gen_movl_T0_reg(s, rm);
7191
                gen_movl_reg_T0(s, rd);
7192
                break;
7193
            case 3:/* branch [and link] exchange thumb register */
7194
                if (insn & (1 << 7)) {
7195
                    val = (uint32_t)s->pc | 1;
7196
                    gen_op_movl_T1_im(val);
7197
                    gen_movl_reg_T1(s, 14);
7198
                }
7199
                gen_movl_T0_reg(s, rm);
7200
                gen_bx(s);
7201
                break;
7202
            }
7203
            break;
7204
        }
7205

    
7206
        /* data processing register */
7207
        rd = insn & 7;
7208
        rm = (insn >> 3) & 7;
7209
        op = (insn >> 6) & 0xf;
7210
        if (op == 2 || op == 3 || op == 4 || op == 7) {
7211
            /* the shift/rotate ops want the operands backwards */
7212
            val = rm;
7213
            rm = rd;
7214
            rd = val;
7215
            val = 1;
7216
        } else {
7217
            val = 0;
7218
        }
7219

    
7220
        if (op == 9) /* neg */
7221
            gen_op_movl_T0_im(0);
7222
        else if (op != 0xf) /* mvn doesn't read its first operand */
7223
            gen_movl_T0_reg(s, rd);
7224

    
7225
        gen_movl_T1_reg(s, rm);
7226
        switch (op) {
7227
        case 0x0: /* and */
7228
            gen_op_andl_T0_T1();
7229
            if (!s->condexec_mask)
7230
                gen_op_logic_T0_cc();
7231
            break;
7232
        case 0x1: /* eor */
7233
            gen_op_xorl_T0_T1();
7234
            if (!s->condexec_mask)
7235
                gen_op_logic_T0_cc();
7236
            break;
7237
        case 0x2: /* lsl */
7238
            if (s->condexec_mask) {
7239
                gen_op_shll_T1_T0();
7240
            } else {
7241
                gen_op_shll_T1_T0_cc();
7242
                gen_op_logic_T1_cc();
7243
            }
7244
            break;
7245
        case 0x3: /* lsr */
7246
            if (s->condexec_mask) {
7247
                gen_op_shrl_T1_T0();
7248
            } else {
7249
                gen_op_shrl_T1_T0_cc();
7250
                gen_op_logic_T1_cc();
7251
            }
7252
            break;
7253
        case 0x4: /* asr */
7254
            if (s->condexec_mask) {
7255
                gen_op_sarl_T1_T0();
7256
            } else {
7257
                gen_op_sarl_T1_T0_cc();
7258
                gen_op_logic_T1_cc();
7259
            }
7260
            break;
7261
        case 0x5: /* adc */
7262
            if (s->condexec_mask)
7263
                gen_adc_T0_T1();
7264
            else
7265
                gen_op_adcl_T0_T1_cc();
7266
            break;
7267
        case 0x6: /* sbc */
7268
            if (s->condexec_mask)
7269
                gen_sbc_T0_T1();
7270
            else
7271
                gen_op_sbcl_T0_T1_cc();
7272
            break;
7273
        case 0x7: /* ror */
7274
            if (s->condexec_mask) {
7275
                gen_op_rorl_T1_T0();
7276
            } else {
7277
                gen_op_rorl_T1_T0_cc();
7278
                gen_op_logic_T1_cc();
7279
            }
7280
            break;
7281
        case 0x8: /* tst */
7282
            gen_op_andl_T0_T1();
7283
            gen_op_logic_T0_cc();
7284
            rd = 16;
7285
            break;
7286
        case 0x9: /* neg */
7287
            if (s->condexec_mask)
7288
                gen_op_subl_T0_T1();
7289
            else
7290
                gen_op_subl_T0_T1_cc();
7291
            break;
7292
        case 0xa: /* cmp */
7293
            gen_op_subl_T0_T1_cc();
7294
            rd = 16;
7295
            break;
7296
        case 0xb: /* cmn */
7297
            gen_op_addl_T0_T1_cc();
7298
            rd = 16;
7299
            break;
7300
        case 0xc: /* orr */
7301
            gen_op_orl_T0_T1();
7302
            if (!s->condexec_mask)
7303
                gen_op_logic_T0_cc();
7304
            break;
7305
        case 0xd: /* mul */
7306
            gen_op_mull_T0_T1();
7307
            if (!s->condexec_mask)
7308
                gen_op_logic_T0_cc();
7309
            break;
7310
        case 0xe: /* bic */
7311
            gen_op_bicl_T0_T1();
7312
            if (!s->condexec_mask)
7313
                gen_op_logic_T0_cc();
7314
            break;
7315
        case 0xf: /* mvn */
7316
            gen_op_notl_T1();
7317
            if (!s->condexec_mask)
7318
                gen_op_logic_T1_cc();
7319
            val = 1;
7320
            rm = rd;
7321
            break;
7322
        }
7323
        if (rd != 16) {
7324
            if (val)
7325
                gen_movl_reg_T1(s, rm);
7326
            else
7327
                gen_movl_reg_T0(s, rd);
7328
        }
7329
        break;
7330

    
7331
    case 5:
7332
        /* load/store register offset.  */
7333
        rd = insn & 7;
7334
        rn = (insn >> 3) & 7;
7335
        rm = (insn >> 6) & 7;
7336
        op = (insn >> 9) & 7;
7337
        gen_movl_T1_reg(s, rn);
7338
        tmp = load_reg(s, rm);
7339
        tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
7340
        dead_tmp(tmp);
7341

    
7342
        if (op < 3) /* store */
7343
            gen_movl_T0_reg(s, rd);
7344

    
7345
        switch (op) {
7346
        case 0: /* str */
7347
            gen_ldst(stl, s);
7348
            break;
7349
        case 1: /* strh */
7350
            gen_ldst(stw, s);
7351
            break;
7352
        case 2: /* strb */
7353
            gen_ldst(stb, s);
7354
            break;
7355
        case 3: /* ldrsb */
7356
            gen_ldst(ldsb, s);
7357
            break;
7358
        case 4: /* ldr */
7359
            gen_ldst(ldl, s);
7360
            break;
7361
        case 5: /* ldrh */
7362
            gen_ldst(lduw, s);
7363
            break;
7364
        case 6: /* ldrb */
7365
            gen_ldst(ldub, s);
7366
            break;
7367
        case 7: /* ldrsh */
7368
            gen_ldst(ldsw, s);
7369
            break;
7370
        }
7371
        if (op >= 3) /* load */
7372
            gen_movl_reg_T0(s, rd);
7373
        break;
7374

    
7375
    case 6:
7376
        /* load/store word immediate offset */
7377
        rd = insn & 7;
7378
        rn = (insn >> 3) & 7;
7379
        gen_movl_T1_reg(s, rn);
7380
        val = (insn >> 4) & 0x7c;
7381
        tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
7382

    
7383
        if (insn & (1 << 11)) {
7384
            /* load */
7385
            gen_ldst(ldl, s);
7386
            gen_movl_reg_T0(s, rd);
7387
        } else {
7388
            /* store */
7389
            gen_movl_T0_reg(s, rd);
7390
            gen_ldst(stl, s);
7391
        }
7392
        break;
7393

    
7394
    case 7:
7395
        /* load/store byte immediate offset */
7396
        rd = insn & 7;
7397
        rn = (insn >> 3) & 7;
7398
        gen_movl_T1_reg(s, rn);
7399
        val = (insn >> 6) & 0x1f;
7400
        tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
7401

    
7402
        if (insn & (1 << 11)) {
7403
            /* load */
7404
            gen_ldst(ldub, s);
7405
            gen_movl_reg_T0(s, rd);
7406
        } else {
7407
            /* store */
7408
            gen_movl_T0_reg(s, rd);
7409
            gen_ldst(stb, s);
7410
        }
7411
        break;
7412

    
7413
    case 8:
7414
        /* load/store halfword immediate offset */
7415
        rd = insn & 7;
7416
        rn = (insn >> 3) & 7;
7417
        gen_movl_T1_reg(s, rn);
7418
        val = (insn >> 5) & 0x3e;
7419
        tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
7420

    
7421
        if (insn & (1 << 11)) {
7422
            /* load */
7423
            gen_ldst(lduw, s);
7424
            gen_movl_reg_T0(s, rd);
7425
        } else {
7426
            /* store */
7427
            gen_movl_T0_reg(s, rd);
7428
            gen_ldst(stw, s);
7429
        }
7430
        break;
7431

    
7432
    case 9:
7433
        /* load/store from stack */
7434
        rd = (insn >> 8) & 7;
7435
        gen_movl_T1_reg(s, 13);
7436
        val = (insn & 0xff) * 4;
7437
        tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
7438

    
7439
        if (insn & (1 << 11)) {
7440
            /* load */
7441
            gen_ldst(ldl, s);
7442
            gen_movl_reg_T0(s, rd);
7443
        } else {
7444
            /* store */
7445
            gen_movl_T0_reg(s, rd);
7446
            gen_ldst(stl, s);
7447
        }
7448
        break;
7449

    
7450
    case 10:
7451
        /* add to high reg */
7452
        rd = (insn >> 8) & 7;
7453
        if (insn & (1 << 11)) {
7454
            /* SP */
7455
            gen_movl_T0_reg(s, 13);
7456
        } else {
7457
            /* PC. bit 1 is ignored.  */
7458
            gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
7459
        }
7460
        val = (insn & 0xff) * 4;
7461
        gen_op_movl_T1_im(val);
7462
        gen_op_addl_T0_T1();
7463
        gen_movl_reg_T0(s, rd);
7464
        break;
7465

    
7466
    case 11:
7467
        /* misc */
7468
        op = (insn >> 8) & 0xf;
7469
        switch (op) {
7470
        case 0:
7471
            /* adjust stack pointer */
7472
            tmp = load_reg(s, 13);
7473
            val = (insn & 0x7f) * 4;
7474
            if (insn & (1 << 7))
7475
              val = -(int32_t)val;
7476
            tcg_gen_addi_i32(tmp, tmp, val);
7477
            store_reg(s, 13, tmp);
7478
            break;
7479

    
7480
        case 2: /* sign/zero extend.  */
7481
            ARCH(6);
7482
            rd = insn & 7;
7483
            rm = (insn >> 3) & 7;
7484
            gen_movl_T1_reg(s, rm);
7485
            switch ((insn >> 6) & 3) {
7486
            case 0: gen_sxth(cpu_T[1]); break;
7487
            case 1: gen_sxtb(cpu_T[1]); break;
7488
            case 2: gen_uxth(cpu_T[1]); break;
7489
            case 3: gen_uxtb(cpu_T[1]); break;
7490
            }
7491
            gen_movl_reg_T1(s, rd);
7492
            break;
7493
        case 4: case 5: case 0xc: case 0xd:
7494
            /* push/pop */
7495
            gen_movl_T1_reg(s, 13);
7496
            if (insn & (1 << 8))
7497
                offset = 4;
7498
            else
7499
                offset = 0;
7500
            for (i = 0; i < 8; i++) {
7501
                if (insn & (1 << i))
7502
                    offset += 4;
7503
            }
7504
            if ((insn & (1 << 11)) == 0) {
7505
                gen_op_addl_T1_im(-offset);
7506
            }
7507
            for (i = 0; i < 8; i++) {
7508
                if (insn & (1 << i)) {
7509
                    if (insn & (1 << 11)) {
7510
                        /* pop */
7511
                        gen_ldst(ldl, s);
7512
                        gen_movl_reg_T0(s, i);
7513
                    } else {
7514
                        /* push */
7515
                        gen_movl_T0_reg(s, i);
7516
                        gen_ldst(stl, s);
7517
                    }
7518
                    /* advance to the next address.  */
7519
                    gen_op_addl_T1_im(4);
7520
                }
7521
            }
7522
            if (insn & (1 << 8)) {
7523
                if (insn & (1 << 11)) {
7524
                    /* pop pc */
7525
                    gen_ldst(ldl, s);
7526
                    /* don't set the pc until the rest of the instruction
7527
                       has completed */
7528
                } else {
7529
                    /* push lr */
7530
                    gen_movl_T0_reg(s, 14);
7531
                    gen_ldst(stl, s);
7532
                }
7533
                gen_op_addl_T1_im(4);
7534
            }
7535
            if ((insn & (1 << 11)) == 0) {
7536
                gen_op_addl_T1_im(-offset);
7537
            }
7538
            /* write back the new stack pointer */
7539
            gen_movl_reg_T1(s, 13);
7540
            /* set the new PC value */
7541
            if ((insn & 0x0900) == 0x0900)
7542
                gen_bx(s);
7543
            break;
7544

    
7545
        case 1: case 3: case 9: case 11: /* czb */
7546
            rm = insn & 7;
7547
            gen_movl_T0_reg(s, rm);
7548
            s->condlabel = gen_new_label();
7549
            s->condjmp = 1;
7550
            if (insn & (1 << 11))
7551
                gen_op_testn_T0(s->condlabel);
7552
            else
7553
                gen_op_test_T0(s->condlabel);
7554

    
7555
            offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
7556
            val = (uint32_t)s->pc + 2;
7557
            val += offset;
7558
            gen_jmp(s, val);
7559
            break;
7560

    
7561
        case 15: /* IT, nop-hint.  */
7562
            if ((insn & 0xf) == 0) {
7563
                gen_nop_hint(s, (insn >> 4) & 0xf);
7564
                break;
7565
            }
7566
            /* If Then.  */
7567
            s->condexec_cond = (insn >> 4) & 0xe;
7568
            s->condexec_mask = insn & 0x1f;
7569
            /* No actual code generated for this insn, just setup state.  */
7570
            break;
7571

    
7572
        case 0xe: /* bkpt */
7573
            gen_set_condexec(s);
7574
            gen_op_movl_T0_im((long)s->pc - 2);
7575
            gen_set_pc_T0();
7576
            gen_op_bkpt();
7577
            s->is_jmp = DISAS_JUMP;
7578
            break;
7579

    
7580
        case 0xa: /* rev */
7581
            ARCH(6);
7582
            rn = (insn >> 3) & 0x7;
7583
            rd = insn & 0x7;
7584
            gen_movl_T0_reg(s, rn);
7585
            switch ((insn >> 6) & 3) {
7586
            case 0: gen_op_rev_T0(); break;
7587
            case 1: gen_rev16(cpu_T[0]); break;
7588
            case 3: gen_revsh(cpu_T[0]); break;
7589
            default: goto illegal_op;
7590
            }
7591
            gen_movl_reg_T0(s, rd);
7592
            break;
7593

    
7594
        case 6: /* cps */
7595
            ARCH(6);
7596
            if (IS_USER(s))
7597
                break;
7598
            if (IS_M(env)) {
7599
                val = (insn & (1 << 4)) != 0;
7600
                gen_op_movl_T0_im(val);
7601
                /* PRIMASK */
7602
                if (insn & 1)
7603
                    gen_op_v7m_msr_T0(16);
7604
                /* FAULTMASK */
7605
                if (insn & 2)
7606
                    gen_op_v7m_msr_T0(17);
7607

    
7608
                gen_lookup_tb(s);
7609
            } else {
7610
                if (insn & (1 << 4))
7611
                    shift = CPSR_A | CPSR_I | CPSR_F;
7612
                else
7613
                    shift = 0;
7614

    
7615
                val = ((insn & 7) << 6) & shift;
7616
                gen_op_movl_T0_im(val);
7617
                gen_set_psr_T0(s, shift, 0);
7618
            }
7619
            break;
7620

    
7621
        default:
7622
            goto undef;
7623
        }
7624
        break;
7625

    
7626
    case 12:
7627
        /* load/store multiple */
7628
        rn = (insn >> 8) & 0x7;
7629
        gen_movl_T1_reg(s, rn);
7630
        for (i = 0; i < 8; i++) {
7631
            if (insn & (1 << i)) {
7632
                if (insn & (1 << 11)) {
7633
                    /* load */
7634
                    gen_ldst(ldl, s);
7635
                    gen_movl_reg_T0(s, i);
7636
                } else {
7637
                    /* store */
7638
                    gen_movl_T0_reg(s, i);
7639
                    gen_ldst(stl, s);
7640
                }
7641
                /* advance to the next address */
7642
                gen_op_addl_T1_im(4);
7643
            }
7644
        }
7645
        /* Base register writeback.  */
7646
        if ((insn & (1 << rn)) == 0)
7647
            gen_movl_reg_T1(s, rn);
7648
        break;
7649

    
7650
    case 13:
7651
        /* conditional branch or swi */
7652
        cond = (insn >> 8) & 0xf;
7653
        if (cond == 0xe)
7654
            goto undef;
7655

    
7656
        if (cond == 0xf) {
7657
            /* swi */
7658
            gen_set_condexec(s);
7659
            gen_op_movl_T0_im((long)s->pc | 1);
7660
            /* Don't set r15.  */
7661
            gen_set_pc_T0();
7662
            s->is_jmp = DISAS_SWI;
7663
            break;
7664
        }
7665
        /* generate a conditional jump to next instruction */
7666
        s->condlabel = gen_new_label();
7667
        gen_test_cc[cond ^ 1](s->condlabel);
7668
        s->condjmp = 1;
7669
        gen_movl_T1_reg(s, 15);
7670

    
7671
        /* jump to the offset */
7672
        val = (uint32_t)s->pc + 2;
7673
        offset = ((int32_t)insn << 24) >> 24;
7674
        val += offset << 1;
7675
        gen_jmp(s, val);
7676
        break;
7677

    
7678
    case 14:
7679
        if (insn & (1 << 11)) {
7680
            if (disas_thumb2_insn(env, s, insn))
7681
              goto undef32;
7682
            break;
7683
        }
7684
        /* unconditional branch */
7685
        val = (uint32_t)s->pc;
7686
        offset = ((int32_t)insn << 21) >> 21;
7687
        val += (offset << 1) + 2;
7688
        gen_jmp(s, val);
7689
        break;
7690

    
7691
    case 15:
7692
        if (disas_thumb2_insn(env, s, insn))
7693
          goto undef32;
7694
        break;
7695
    }
7696
    return;
7697
undef32:
7698
    gen_set_condexec(s);
7699
    gen_op_movl_T0_im((long)s->pc - 4);
7700
    gen_set_pc_T0();
7701
    gen_op_undef_insn();
7702
    s->is_jmp = DISAS_JUMP;
7703
    return;
7704
illegal_op:
7705
undef:
7706
    gen_set_condexec(s);
7707
    gen_op_movl_T0_im((long)s->pc - 2);
7708
    gen_set_pc_T0();
7709
    gen_op_undef_insn();
7710
    s->is_jmp = DISAS_JUMP;
7711
}
7712

    
7713
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7714
   basic block 'tb'. If search_pc is TRUE, also generate PC
7715
   information for each intermediate instruction. */
7716
static inline int gen_intermediate_code_internal(CPUState *env,
7717
                                                 TranslationBlock *tb,
7718
                                                 int search_pc)
7719
{
7720
    DisasContext dc1, *dc = &dc1;
7721
    uint16_t *gen_opc_end;
7722
    int j, lj;
7723
    target_ulong pc_start;
7724
    uint32_t next_page_start;
7725

    
7726
    /* generate intermediate code */
7727
    num_temps = 0;
7728
    memset(temps, 0, sizeof(temps));
7729

    
7730
    pc_start = tb->pc;
7731

    
7732
    dc->tb = tb;
7733

    
7734
    gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7735

    
7736
    dc->is_jmp = DISAS_NEXT;
7737
    dc->pc = pc_start;
7738
    dc->singlestep_enabled = env->singlestep_enabled;
7739
    dc->condjmp = 0;
7740
    dc->thumb = env->thumb;
7741
    dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
7742
    dc->condexec_cond = env->condexec_bits >> 4;
7743
    dc->is_mem = 0;
7744
#if !defined(CONFIG_USER_ONLY)
7745
    if (IS_M(env)) {
7746
        dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
7747
    } else {
7748
        dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
7749
    }
7750
#endif
7751
    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
7752
    lj = -1;
7753
    /* Reset the conditional execution bits immediately. This avoids
7754
       complications trying to do it at the end of the block.  */
7755
    if (env->condexec_bits)
7756
      {
7757
        TCGv tmp = new_tmp();
7758
        tcg_gen_movi_i32(tmp, 0);
7759
        tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, condexec_bits));
7760
        dead_tmp(tmp);
7761
      }
7762
    do {
7763
#ifndef CONFIG_USER_ONLY
7764
        if (dc->pc >= 0xfffffff0 && IS_M(env)) {
7765
            /* We always get here via a jump, so know we are not in a
7766
               conditional execution block.  */
7767
            gen_op_exception_exit();
7768
        }
7769
#endif
7770

    
7771
        if (env->nb_breakpoints > 0) {
7772
            for(j = 0; j < env->nb_breakpoints; j++) {
7773
                if (env->breakpoints[j] == dc->pc) {
7774
                    gen_set_condexec(dc);
7775
                    gen_op_movl_T0_im((long)dc->pc);
7776
                    gen_set_pc_T0();
7777
                    gen_op_debug();
7778
                    dc->is_jmp = DISAS_JUMP;
7779
                    /* Advance PC so that clearing the breakpoint will
7780
                       invalidate this TB.  */
7781
                    dc->pc += 2;
7782
                    goto done_generating;
7783
                    break;
7784
                }
7785
            }
7786
        }
7787
        if (search_pc) {
7788
            j = gen_opc_ptr - gen_opc_buf;
7789
            if (lj < j) {
7790
                lj++;
7791
                while (lj < j)
7792
                    gen_opc_instr_start[lj++] = 0;
7793
            }
7794
            gen_opc_pc[lj] = dc->pc;
7795
            gen_opc_instr_start[lj] = 1;
7796
        }
7797

    
7798
        if (env->thumb) {
7799
            disas_thumb_insn(env, dc);
7800
            if (dc->condexec_mask) {
7801
                dc->condexec_cond = (dc->condexec_cond & 0xe)
7802
                                   | ((dc->condexec_mask >> 4) & 1);
7803
                dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
7804
                if (dc->condexec_mask == 0) {
7805
                    dc->condexec_cond = 0;
7806
                }
7807
            }
7808
        } else {
7809
            disas_arm_insn(env, dc);
7810
        }
7811
        if (num_temps) {
7812
            fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
7813
            num_temps = 0;
7814
        }
7815

    
7816
        if (dc->condjmp && !dc->is_jmp) {
7817
            gen_set_label(dc->condlabel);
7818
            dc->condjmp = 0;
7819
        }
7820
        /* Terminate the TB on memory ops if watchpoints are present.  */
7821
        /* FIXME: This should be replacd by the deterministic execution
7822
         * IRQ raising bits.  */
7823
        if (dc->is_mem && env->nb_watchpoints)
7824
            break;
7825

    
7826
        /* Translation stops when a conditional branch is enoutered.
7827
         * Otherwise the subsequent code could get translated several times.
7828
         * Also stop translation when a page boundary is reached.  This
7829
         * ensures prefech aborts occur at the right place.  */
7830
    } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
7831
             !env->singlestep_enabled &&
7832
             dc->pc < next_page_start);
7833

    
7834
    /* At this stage dc->condjmp will only be set when the skipped
7835
       instruction was a conditional branch or trap, and the PC has
7836
       already been written.  */
7837
    if (__builtin_expect(env->singlestep_enabled, 0)) {
7838
        /* Make sure the pc is updated, and raise a debug exception.  */
7839
        if (dc->condjmp) {
7840
            gen_set_condexec(dc);
7841
            if (dc->is_jmp == DISAS_SWI) {
7842
                gen_op_swi();
7843
            } else {
7844
                gen_op_debug();
7845
            }
7846
            gen_set_label(dc->condlabel);
7847
        }
7848
        if (dc->condjmp || !dc->is_jmp) {
7849
            gen_op_movl_T0_im((long)dc->pc);
7850
            gen_set_pc_T0();
7851
            dc->condjmp = 0;
7852
        }
7853
        gen_set_condexec(dc);
7854
        if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
7855
            gen_op_swi();
7856
        } else {
7857
            /* FIXME: Single stepping a WFI insn will not halt
7858
               the CPU.  */
7859
            gen_op_debug();
7860
        }
7861
    } else {
7862
        /* While branches must always occur at the end of an IT block,
7863
           there are a few other things that can cause us to terminate
7864
           the TB in the middel of an IT block:
7865
            - Exception generating instructions (bkpt, swi, undefined).
7866
            - Page boundaries.
7867
            - Hardware watchpoints.
7868
           Hardware breakpoints have already been handled and skip this code.
7869
         */
7870
        gen_set_condexec(dc);
7871
        switch(dc->is_jmp) {
7872
        case DISAS_NEXT:
7873
            gen_goto_tb(dc, 1, dc->pc);
7874
            break;
7875
        default:
7876
        case DISAS_JUMP:
7877
        case DISAS_UPDATE:
7878
            /* indicate that the hash table must be used to find the next TB */
7879
            tcg_gen_exit_tb(0);
7880
            break;
7881
        case DISAS_TB_JUMP:
7882
            /* nothing more to generate */
7883
            break;
7884
        case DISAS_WFI:
7885
            gen_op_wfi();
7886
            break;
7887
        case DISAS_SWI:
7888
            gen_op_swi();
7889
            break;
7890
        }
7891
        if (dc->condjmp) {
7892
            gen_set_label(dc->condlabel);
7893
            gen_set_condexec(dc);
7894
            gen_goto_tb(dc, 1, dc->pc);
7895
            dc->condjmp = 0;
7896
        }
7897
    }
7898
done_generating:
7899
    *gen_opc_ptr = INDEX_op_end;
7900

    
7901
#ifdef DEBUG_DISAS
7902
    if (loglevel & CPU_LOG_TB_IN_ASM) {
7903
        fprintf(logfile, "----------------\n");
7904
        fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
7905
        target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
7906
        fprintf(logfile, "\n");
7907
    }
7908
#endif
7909
    if (search_pc) {
7910
        j = gen_opc_ptr - gen_opc_buf;
7911
        lj++;
7912
        while (lj <= j)
7913
            gen_opc_instr_start[lj++] = 0;
7914
    } else {
7915
        tb->size = dc->pc - pc_start;
7916
    }
7917
    return 0;
7918
}
7919

    
7920
int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
7921
{
7922
    return gen_intermediate_code_internal(env, tb, 0);
7923
}
7924

    
7925
int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
7926
{
7927
    return gen_intermediate_code_internal(env, tb, 1);
7928
}
7929

    
7930
static const char *cpu_mode_names[16] = {
7931
  "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
7932
  "???", "???", "???", "und", "???", "???", "???", "sys"
7933
};
7934

    
7935
void cpu_dump_state(CPUState *env, FILE *f,
7936
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
7937
                    int flags)
7938
{
7939
    int i;
7940
    union {
7941
        uint32_t i;
7942
        float s;
7943
    } s0, s1;
7944
    CPU_DoubleU d;
7945
    /* ??? This assumes float64 and double have the same layout.
7946
       Oh well, it's only debug dumps.  */
7947
    union {
7948
        float64 f64;
7949
        double d;
7950
    } d0;
7951
    uint32_t psr;
7952

    
7953
    for(i=0;i<16;i++) {
7954
        cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
7955
        if ((i % 4) == 3)
7956
            cpu_fprintf(f, "\n");
7957
        else
7958
            cpu_fprintf(f, " ");
7959
    }
7960
    psr = cpsr_read(env);
7961
    cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
7962
                psr,
7963
                psr & (1 << 31) ? 'N' : '-',
7964
                psr & (1 << 30) ? 'Z' : '-',
7965
                psr & (1 << 29) ? 'C' : '-',
7966
                psr & (1 << 28) ? 'V' : '-',
7967
                psr & CPSR_T ? 'T' : 'A',
7968
                cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
7969

    
7970
    for (i = 0; i < 16; i++) {
7971
        d.d = env->vfp.regs[i];
7972
        s0.i = d.l.lower;
7973
        s1.i = d.l.upper;
7974
        d0.f64 = d.d;
7975
        cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
7976
                    i * 2, (int)s0.i, s0.s,
7977
                    i * 2 + 1, (int)s1.i, s1.s,
7978
                    i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
7979
                    d0.d);
7980
    }
7981
    cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
7982
}
7983