Statistics
| Branch: | Revision:

root / target-arm / translate.c @ f51bbbfe

History | View | Annotate | Download (260 kB)

1
/*
2
 *  ARM translation
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *  Copyright (c) 2005-2007 CodeSourcery
6
 *  Copyright (c) 2007 OpenedHand, Ltd.
7
 *
8
 * This library is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU Lesser General Public
10
 * License as published by the Free Software Foundation; either
11
 * version 2 of the License, or (at your option) any later version.
12
 *
13
 * This library is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * Lesser General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with this library; if not, write to the Free Software
20
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21
 */
22
#include <stdarg.h>
23
#include <stdlib.h>
24
#include <stdio.h>
25
#include <string.h>
26
#include <inttypes.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30
#include "disas.h"
31
#include "tcg-op.h"
32
#include "helpers.h"
33

    
34
#define ENABLE_ARCH_5J    0
35
#define ENABLE_ARCH_6     arm_feature(env, ARM_FEATURE_V6)
36
#define ENABLE_ARCH_6K   arm_feature(env, ARM_FEATURE_V6K)
37
#define ENABLE_ARCH_6T2   arm_feature(env, ARM_FEATURE_THUMB2)
38
#define ENABLE_ARCH_7     arm_feature(env, ARM_FEATURE_V7)
39

    
40
#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
41

    
42
/* internal defines */
43
typedef struct DisasContext {
44
    target_ulong pc;
45
    int is_jmp;
46
    /* Nonzero if this instruction has been conditionally skipped.  */
47
    int condjmp;
48
    /* The label that will be jumped to when the instruction is skipped.  */
49
    int condlabel;
50
    /* Thumb-2 condtional execution bits.  */
51
    int condexec_mask;
52
    int condexec_cond;
53
    struct TranslationBlock *tb;
54
    int singlestep_enabled;
55
    int thumb;
56
    int is_mem;
57
#if !defined(CONFIG_USER_ONLY)
58
    int user;
59
#endif
60
} DisasContext;
61

    
62
#if defined(CONFIG_USER_ONLY)
63
#define IS_USER(s) 1
64
#else
65
#define IS_USER(s) (s->user)
66
#endif
67

    
68
/* These instructions trap after executing, so defer them until after the
69
   conditional executions state has been updated.  */
70
#define DISAS_WFI 4
71
#define DISAS_SWI 5
72

    
73
/* XXX: move that elsewhere */
74
extern FILE *logfile;
75
extern int loglevel;
76

    
77
static TCGv cpu_env;
78
/* FIXME:  These should be removed.  */
79
static TCGv cpu_T[3];
80

    
81
/* initialize TCG globals.  */
82
void arm_translate_init(void)
83
{
84
    cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
85

    
86
    cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
87
    cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
88
    cpu_T[2] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG3, "T2");
89
}
90

    
91
/* The code generator doesn't like lots of temporaries, so maintain our own
92
   cache for reuse within a function.  */
93
#define MAX_TEMPS 8
94
static int num_temps;
95
static TCGv temps[MAX_TEMPS];
96

    
97
/* Allocate a temporary variable.  */
98
static TCGv new_tmp(void)
99
{
100
    TCGv tmp;
101
    if (num_temps == MAX_TEMPS)
102
        abort();
103

    
104
    if (GET_TCGV(temps[num_temps]))
105
      return temps[num_temps++];
106

    
107
    tmp = tcg_temp_new(TCG_TYPE_I32);
108
    temps[num_temps++] = tmp;
109
    return tmp;
110
}
111

    
112
/* Release a temporary variable.  */
113
static void dead_tmp(TCGv tmp)
114
{
115
    int i;
116
    num_temps--;
117
    i = num_temps;
118
    if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
119
        return;
120

    
121
    /* Shuffle this temp to the last slot.  */
122
    while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
123
        i--;
124
    while (i < num_temps) {
125
        temps[i] = temps[i + 1];
126
        i++;
127
    }
128
    temps[i] = tmp;
129
}
130

    
131
/* Set a variable to the value of a CPU register.  */
132
static void load_reg_var(DisasContext *s, TCGv var, int reg)
133
{
134
    if (reg == 15) {
135
        uint32_t addr;
136
        /* normaly, since we updated PC, we need only to add one insn */
137
        if (s->thumb)
138
            addr = (long)s->pc + 2;
139
        else
140
            addr = (long)s->pc + 4;
141
        tcg_gen_movi_i32(var, addr);
142
    } else {
143
        tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
144
    }
145
}
146

    
147
/* Create a new temporary and set it to the value of a CPU register.  */
148
static inline TCGv load_reg(DisasContext *s, int reg)
149
{
150
    TCGv tmp = new_tmp();
151
    load_reg_var(s, tmp, reg);
152
    return tmp;
153
}
154

    
155
/* Set a CPU register.  The source must be a temporary and will be
156
   marked as dead.  */
157
static void store_reg(DisasContext *s, int reg, TCGv var)
158
{
159
    if (reg == 15) {
160
        tcg_gen_andi_i32(var, var, ~1);
161
        s->is_jmp = DISAS_JUMP;
162
    }
163
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
164
    dead_tmp(var);
165
}
166

    
167

    
168
/* Basic operations.  */
169
#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
170
#define gen_op_movl_T0_T2() tcg_gen_mov_i32(cpu_T[0], cpu_T[2])
171
#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
172
#define gen_op_movl_T1_T2() tcg_gen_mov_i32(cpu_T[1], cpu_T[2])
173
#define gen_op_movl_T2_T0() tcg_gen_mov_i32(cpu_T[2], cpu_T[0])
174
#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
175
#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
176
#define gen_op_movl_T2_im(im) tcg_gen_movi_i32(cpu_T[2], im)
177

    
178
#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
179
#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
180
#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
181
#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
182

    
183
#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
184
#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
185
#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
186
#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
187
#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
188
#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
189
#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
190

    
191
#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
192
#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
193
#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
194
#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
195
#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
196

    
197
/* Value extensions.  */
198
#define gen_uxtb(var) tcg_gen_andi_i32(var, var, 0xff)
199
#define gen_uxth(var) tcg_gen_andi_i32(var, var, 0xffff)
200
#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
201
#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
202

    
203
#define HELPER_ADDR(x) helper_##x
204

    
205
#define gen_sxtb16(var) tcg_gen_helper_1_1(HELPER_ADDR(sxtb16), var, var)
206
#define gen_uxtb16(var) tcg_gen_helper_1_1(HELPER_ADDR(uxtb16), var, var)
207

    
208
#define gen_op_clz_T0(var) \
209
    tcg_gen_helper_1_1(HELPER_ADDR(clz), cpu_T[0], cpu_T[0])
210

    
211
/* Dual 16-bit add.  Result placed in t0 and t1 is marked as dead.
212
    tmp = (t0 ^ t1) & 0x8000;
213
    t0 &= ~0x8000;
214
    t1 &= ~0x8000;
215
    t0 = (t0 + t1) ^ tmp;
216
 */
217

    
218
static void gen_add16(TCGv t0, TCGv t1)
219
{
220
    TCGv tmp = new_tmp();
221
    tcg_gen_xor_i32(tmp, t0, t1);
222
    tcg_gen_andi_i32(tmp, tmp, 0x8000);
223
    tcg_gen_andi_i32(t0, t0, ~0x8000);
224
    tcg_gen_andi_i32(t1, t1, ~0x8000);
225
    tcg_gen_add_i32(t0, t0, t1);
226
    tcg_gen_xor_i32(t0, t0, tmp);
227
    dead_tmp(tmp);
228
    dead_tmp(t1);
229
}
230

    
231
/* Set CF to the top bit of var.  */
232
static void gen_set_CF_bit31(TCGv var)
233
{
234
    TCGv tmp = new_tmp();
235
    tcg_gen_shri_i32(tmp, var, 31);
236
    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, CF));
237
    dead_tmp(tmp);
238
}
239

    
240
/* Set N and Z flags from var.  */
241
static inline void gen_logic_CC(TCGv var)
242
{
243
    tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NZF));
244
}
245

    
246
/* T0 += T1 + CF.  */
247
static void gen_adc_T0_T1(void)
248
{
249
    TCGv tmp = new_tmp();
250
    gen_op_addl_T0_T1();
251
    tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUState, CF));
252
    tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
253
    dead_tmp(tmp);
254
}
255

    
256
/* FIXME:  Implement this natively.  */
257
static inline void tcg_gen_not_i32(TCGv t0, TCGv t1)
258
{
259
    tcg_gen_xori_i32(t0, t1, ~0);
260
}
261

    
262
/* T0 &= ~T1.  Clobbers T1.  */
263
/* FIXME: Implement bic natively.  */
264
static inline void gen_op_bicl_T0_T1(void)
265
{
266
    gen_op_notl_T1();
267
    gen_op_andl_T0_T1();
268
}
269

    
270
/* FIXME:  Implement this natively.  */
271
static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
272
{
273
    TCGv tmp;
274

    
275
    if (i == 0)
276
        return;
277

    
278
    tmp = new_tmp();
279
    tcg_gen_shri_i32(tmp, t1, i);
280
    tcg_gen_shli_i32(t1, t1, 32 - i);
281
    tcg_gen_or_i32(t0, t1, tmp);
282
    dead_tmp(tmp);
283
}
284

    
285
/* Shift by immediate.  Includes special handling for shift == 0.  */
286
static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift)
287
{
288
    if (shift != 0) {
289
        switch (shiftop) {
290
        case 0: tcg_gen_shli_i32(var, var, shift); break;
291
        case 1: tcg_gen_shri_i32(var, var, shift); break;
292
        case 2: tcg_gen_sari_i32(var, var, shift); break;
293
        case 3: tcg_gen_rori_i32(var, var, shift); break;
294
        }
295
    } else {
296
        TCGv tmp;
297

    
298
        switch (shiftop) {
299
        case 0: break;
300
        case 1: tcg_gen_movi_i32(var, 0); break;
301
        case 2: tcg_gen_sari_i32(var, var, 31); break;
302
        case 3: /* rrx */
303
            tcg_gen_shri_i32(var, var, 1);
304
            tmp = new_tmp();
305
            tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUState, CF));
306
            tcg_gen_shli_i32(tmp, tmp, 31);
307
            tcg_gen_or_i32(var, var, tmp);
308
            dead_tmp(tmp);
309
            break;
310
        }
311
    }
312
};
313

    
314
#define PAS_OP(pfx) {  \
315
    gen_op_ ## pfx ## add16_T0_T1, \
316
    gen_op_ ## pfx ## addsubx_T0_T1, \
317
    gen_op_ ## pfx ## subaddx_T0_T1, \
318
    gen_op_ ## pfx ## sub16_T0_T1, \
319
    gen_op_ ## pfx ## add8_T0_T1, \
320
    NULL, \
321
    NULL, \
322
    gen_op_ ## pfx ## sub8_T0_T1 }
323

    
324
static GenOpFunc *gen_arm_parallel_addsub[8][8] = {
325
    {},
326
    PAS_OP(s),
327
    PAS_OP(q),
328
    PAS_OP(sh),
329
    {},
330
    PAS_OP(u),
331
    PAS_OP(uq),
332
    PAS_OP(uh),
333
};
334
#undef PAS_OP
335

    
336
/* For unknown reasons Arm and Thumb-2 use arbitrarily diffenet encodings.  */
337
#define PAS_OP(pfx) {  \
338
    gen_op_ ## pfx ## add8_T0_T1, \
339
    gen_op_ ## pfx ## add16_T0_T1, \
340
    gen_op_ ## pfx ## addsubx_T0_T1, \
341
    NULL, \
342
    gen_op_ ## pfx ## sub8_T0_T1, \
343
    gen_op_ ## pfx ## sub16_T0_T1, \
344
    gen_op_ ## pfx ## subaddx_T0_T1, \
345
    NULL }
346

    
347
static GenOpFunc *gen_thumb2_parallel_addsub[8][8] = {
348
    PAS_OP(s),
349
    PAS_OP(q),
350
    PAS_OP(sh),
351
    {},
352
    PAS_OP(u),
353
    PAS_OP(uq),
354
    PAS_OP(uh),
355
    {}
356
};
357
#undef PAS_OP
358

    
359
static GenOpFunc1 *gen_test_cc[14] = {
360
    gen_op_test_eq,
361
    gen_op_test_ne,
362
    gen_op_test_cs,
363
    gen_op_test_cc,
364
    gen_op_test_mi,
365
    gen_op_test_pl,
366
    gen_op_test_vs,
367
    gen_op_test_vc,
368
    gen_op_test_hi,
369
    gen_op_test_ls,
370
    gen_op_test_ge,
371
    gen_op_test_lt,
372
    gen_op_test_gt,
373
    gen_op_test_le,
374
};
375

    
376
const uint8_t table_logic_cc[16] = {
377
    1, /* and */
378
    1, /* xor */
379
    0, /* sub */
380
    0, /* rsb */
381
    0, /* add */
382
    0, /* adc */
383
    0, /* sbc */
384
    0, /* rsc */
385
    1, /* andl */
386
    1, /* xorl */
387
    0, /* cmp */
388
    0, /* cmn */
389
    1, /* orr */
390
    1, /* mov */
391
    1, /* bic */
392
    1, /* mvn */
393
};
394

    
395
static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
396
    gen_op_shll_T1_im_cc,
397
    gen_op_shrl_T1_im_cc,
398
    gen_op_sarl_T1_im_cc,
399
    gen_op_rorl_T1_im_cc,
400
};
401

    
402
static GenOpFunc *gen_shift_T1_0_cc[4] = {
403
    NULL,
404
    gen_op_shrl_T1_0_cc,
405
    gen_op_sarl_T1_0_cc,
406
    gen_op_rrxl_T1_cc,
407
};
408

    
409
static GenOpFunc *gen_shift_T1_T0[4] = {
410
    gen_op_shll_T1_T0,
411
    gen_op_shrl_T1_T0,
412
    gen_op_sarl_T1_T0,
413
    gen_op_rorl_T1_T0,
414
};
415

    
416
static GenOpFunc *gen_shift_T1_T0_cc[4] = {
417
    gen_op_shll_T1_T0_cc,
418
    gen_op_shrl_T1_T0_cc,
419
    gen_op_sarl_T1_T0_cc,
420
    gen_op_rorl_T1_T0_cc,
421
};
422

    
423
static GenOpFunc1 *gen_shift_T0_im_thumb_cc[3] = {
424
    gen_op_shll_T0_im_thumb_cc,
425
    gen_op_shrl_T0_im_thumb_cc,
426
    gen_op_sarl_T0_im_thumb_cc,
427
};
428

    
429
static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
430
    gen_op_shll_T0_im_thumb,
431
    gen_op_shrl_T0_im_thumb,
432
    gen_op_sarl_T0_im_thumb,
433
};
434

    
435
/* Set PC and thumb state from T0.  Clobbers T0.  */
436
static inline void gen_bx(DisasContext *s)
437
{
438
    TCGv tmp;
439

    
440
    s->is_jmp = DISAS_UPDATE;
441
    tmp = new_tmp();
442
    tcg_gen_andi_i32(tmp, cpu_T[0], 1);
443
    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
444
    dead_tmp(tmp);
445
    tcg_gen_andi_i32(cpu_T[0], cpu_T[0], ~1);
446
    tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, regs[15]));
447
}
448

    
449
#if defined(CONFIG_USER_ONLY)
450
#define gen_ldst(name, s) gen_op_##name##_raw()
451
#else
452
#define gen_ldst(name, s) do { \
453
    s->is_mem = 1; \
454
    if (IS_USER(s)) \
455
        gen_op_##name##_user(); \
456
    else \
457
        gen_op_##name##_kernel(); \
458
    } while (0)
459
#endif
460

    
461
static inline void gen_movl_T0_reg(DisasContext *s, int reg)
462
{
463
    load_reg_var(s, cpu_T[0], reg);
464
}
465

    
466
static inline void gen_movl_T1_reg(DisasContext *s, int reg)
467
{
468
    load_reg_var(s, cpu_T[1], reg);
469
}
470

    
471
static inline void gen_movl_T2_reg(DisasContext *s, int reg)
472
{
473
    load_reg_var(s, cpu_T[2], reg);
474
}
475

    
476
static inline void gen_set_pc_T0(void)
477
{
478
    tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, regs[15]));
479
}
480

    
481
static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
482
{
483
    TCGv tmp;
484
    if (reg == 15) {
485
        tmp = new_tmp();
486
        tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
487
    } else {
488
        tmp = cpu_T[t];
489
    }
490
    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
491
    if (reg == 15) {
492
        dead_tmp(tmp);
493
        s->is_jmp = DISAS_JUMP;
494
    }
495
}
496

    
497
static inline void gen_movl_reg_T0(DisasContext *s, int reg)
498
{
499
    gen_movl_reg_TN(s, reg, 0);
500
}
501

    
502
static inline void gen_movl_reg_T1(DisasContext *s, int reg)
503
{
504
    gen_movl_reg_TN(s, reg, 1);
505
}
506

    
507
/* Force a TB lookup after an instruction that changes the CPU state.  */
508
static inline void gen_lookup_tb(DisasContext *s)
509
{
510
    gen_op_movl_T0_im(s->pc);
511
    gen_movl_reg_T0(s, 15);
512
    s->is_jmp = DISAS_UPDATE;
513
}
514

    
515
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
516
{
517
    int val, rm, shift, shiftop;
518
    TCGv offset;
519

    
520
    if (!(insn & (1 << 25))) {
521
        /* immediate */
522
        val = insn & 0xfff;
523
        if (!(insn & (1 << 23)))
524
            val = -val;
525
        if (val != 0)
526
            gen_op_addl_T1_im(val);
527
    } else {
528
        /* shift/register */
529
        rm = (insn) & 0xf;
530
        shift = (insn >> 7) & 0x1f;
531
        shiftop = (insn >> 5) & 3;
532
        offset = load_reg(s, rm);
533
        gen_arm_shift_im(offset, shiftop, shift);
534
        if (!(insn & (1 << 23)))
535
            tcg_gen_sub_i32(cpu_T[1], cpu_T[1], offset);
536
        else
537
            tcg_gen_add_i32(cpu_T[1], cpu_T[1], offset);
538
        dead_tmp(offset);
539
    }
540
}
541

    
542
static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
543
                                        int extra)
544
{
545
    int val, rm;
546
    TCGv offset;
547

    
548
    if (insn & (1 << 22)) {
549
        /* immediate */
550
        val = (insn & 0xf) | ((insn >> 4) & 0xf0);
551
        if (!(insn & (1 << 23)))
552
            val = -val;
553
        val += extra;
554
        if (val != 0)
555
            gen_op_addl_T1_im(val);
556
    } else {
557
        /* register */
558
        if (extra)
559
            gen_op_addl_T1_im(extra);
560
        rm = (insn) & 0xf;
561
        offset = load_reg(s, rm);
562
        if (!(insn & (1 << 23)))
563
            tcg_gen_sub_i32(cpu_T[1], cpu_T[1], offset);
564
        else
565
            tcg_gen_add_i32(cpu_T[1], cpu_T[1], offset);
566
        dead_tmp(offset);
567
    }
568
}
569

    
570
#define VFP_OP(name)                      \
571
static inline void gen_vfp_##name(int dp) \
572
{                                         \
573
    if (dp)                               \
574
        gen_op_vfp_##name##d();           \
575
    else                                  \
576
        gen_op_vfp_##name##s();           \
577
}
578

    
579
#define VFP_OP1(name)                               \
580
static inline void gen_vfp_##name(int dp, int arg)  \
581
{                                                   \
582
    if (dp)                                         \
583
        gen_op_vfp_##name##d(arg);                  \
584
    else                                            \
585
        gen_op_vfp_##name##s(arg);                  \
586
}
587

    
588
VFP_OP(add)
589
VFP_OP(sub)
590
VFP_OP(mul)
591
VFP_OP(div)
592
VFP_OP(neg)
593
VFP_OP(abs)
594
VFP_OP(sqrt)
595
VFP_OP(cmp)
596
VFP_OP(cmpe)
597
VFP_OP(F1_ld0)
598
VFP_OP(uito)
599
VFP_OP(sito)
600
VFP_OP(toui)
601
VFP_OP(touiz)
602
VFP_OP(tosi)
603
VFP_OP(tosiz)
604
VFP_OP1(tosh)
605
VFP_OP1(tosl)
606
VFP_OP1(touh)
607
VFP_OP1(toul)
608
VFP_OP1(shto)
609
VFP_OP1(slto)
610
VFP_OP1(uhto)
611
VFP_OP1(ulto)
612

    
613
#undef VFP_OP
614

    
615
static inline void gen_vfp_fconst(int dp, uint32_t val)
616
{
617
    if (dp)
618
        gen_op_vfp_fconstd(val);
619
    else
620
        gen_op_vfp_fconsts(val);
621
}
622

    
623
static inline void gen_vfp_ld(DisasContext *s, int dp)
624
{
625
    if (dp)
626
        gen_ldst(vfp_ldd, s);
627
    else
628
        gen_ldst(vfp_lds, s);
629
}
630

    
631
static inline void gen_vfp_st(DisasContext *s, int dp)
632
{
633
    if (dp)
634
        gen_ldst(vfp_std, s);
635
    else
636
        gen_ldst(vfp_sts, s);
637
}
638

    
639
static inline long
640
vfp_reg_offset (int dp, int reg)
641
{
642
    if (dp)
643
        return offsetof(CPUARMState, vfp.regs[reg]);
644
    else if (reg & 1) {
645
        return offsetof(CPUARMState, vfp.regs[reg >> 1])
646
          + offsetof(CPU_DoubleU, l.upper);
647
    } else {
648
        return offsetof(CPUARMState, vfp.regs[reg >> 1])
649
          + offsetof(CPU_DoubleU, l.lower);
650
    }
651
}
652

    
653
/* Return the offset of a 32-bit piece of a NEON register.
654
   zero is the least significant end of the register.  */
655
static inline long
656
neon_reg_offset (int reg, int n)
657
{
658
    int sreg;
659
    sreg = reg * 2 + n;
660
    return vfp_reg_offset(0, sreg);
661
}
662

    
663
#define NEON_GET_REG(T, reg, n) gen_op_neon_getreg_##T(neon_reg_offset(reg, n))
664
#define NEON_SET_REG(T, reg, n) gen_op_neon_setreg_##T(neon_reg_offset(reg, n))
665

    
666
static inline void gen_mov_F0_vreg(int dp, int reg)
667
{
668
    if (dp)
669
        gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
670
    else
671
        gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
672
}
673

    
674
static inline void gen_mov_F1_vreg(int dp, int reg)
675
{
676
    if (dp)
677
        gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
678
    else
679
        gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
680
}
681

    
682
static inline void gen_mov_vreg_F0(int dp, int reg)
683
{
684
    if (dp)
685
        gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
686
    else
687
        gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
688
}
689

    
690
#define ARM_CP_RW_BIT        (1 << 20)
691

    
692
static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
693
{
694
    int rd;
695
    uint32_t offset;
696

    
697
    rd = (insn >> 16) & 0xf;
698
    gen_movl_T1_reg(s, rd);
699

    
700
    offset = (insn & 0xff) << ((insn >> 7) & 2);
701
    if (insn & (1 << 24)) {
702
        /* Pre indexed */
703
        if (insn & (1 << 23))
704
            gen_op_addl_T1_im(offset);
705
        else
706
            gen_op_addl_T1_im(-offset);
707

    
708
        if (insn & (1 << 21))
709
            gen_movl_reg_T1(s, rd);
710
    } else if (insn & (1 << 21)) {
711
        /* Post indexed */
712
        if (insn & (1 << 23))
713
            gen_op_movl_T0_im(offset);
714
        else
715
            gen_op_movl_T0_im(- offset);
716
        gen_op_addl_T0_T1();
717
        gen_movl_reg_T0(s, rd);
718
    } else if (!(insn & (1 << 23)))
719
        return 1;
720
    return 0;
721
}
722

    
723
static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
724
{
725
    int rd = (insn >> 0) & 0xf;
726

    
727
    if (insn & (1 << 8))
728
        if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
729
            return 1;
730
        else
731
            gen_op_iwmmxt_movl_T0_wCx(rd);
732
    else
733
        gen_op_iwmmxt_movl_T0_T1_wRn(rd);
734

    
735
    gen_op_movl_T1_im(mask);
736
    gen_op_andl_T0_T1();
737
    return 0;
738
}
739

    
740
/* Disassemble an iwMMXt instruction.  Returns nonzero if an error occured
741
   (ie. an undefined instruction).  */
742
static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
743
{
744
    int rd, wrd;
745
    int rdhi, rdlo, rd0, rd1, i;
746

    
747
    if ((insn & 0x0e000e00) == 0x0c000000) {
748
        if ((insn & 0x0fe00ff0) == 0x0c400000) {
749
            wrd = insn & 0xf;
750
            rdlo = (insn >> 12) & 0xf;
751
            rdhi = (insn >> 16) & 0xf;
752
            if (insn & ARM_CP_RW_BIT) {                        /* TMRRC */
753
                gen_op_iwmmxt_movl_T0_T1_wRn(wrd);
754
                gen_movl_reg_T0(s, rdlo);
755
                gen_movl_reg_T1(s, rdhi);
756
            } else {                                        /* TMCRR */
757
                gen_movl_T0_reg(s, rdlo);
758
                gen_movl_T1_reg(s, rdhi);
759
                gen_op_iwmmxt_movl_wRn_T0_T1(wrd);
760
                gen_op_iwmmxt_set_mup();
761
            }
762
            return 0;
763
        }
764

    
765
        wrd = (insn >> 12) & 0xf;
766
        if (gen_iwmmxt_address(s, insn))
767
            return 1;
768
        if (insn & ARM_CP_RW_BIT) {
769
            if ((insn >> 28) == 0xf) {                        /* WLDRW wCx */
770
                gen_ldst(ldl, s);
771
                gen_op_iwmmxt_movl_wCx_T0(wrd);
772
            } else {
773
                if (insn & (1 << 8))
774
                    if (insn & (1 << 22))                /* WLDRD */
775
                        gen_ldst(iwmmxt_ldq, s);
776
                    else                                /* WLDRW wRd */
777
                        gen_ldst(iwmmxt_ldl, s);
778
                else
779
                    if (insn & (1 << 22))                /* WLDRH */
780
                        gen_ldst(iwmmxt_ldw, s);
781
                    else                                /* WLDRB */
782
                        gen_ldst(iwmmxt_ldb, s);
783
                gen_op_iwmmxt_movq_wRn_M0(wrd);
784
            }
785
        } else {
786
            if ((insn >> 28) == 0xf) {                        /* WSTRW wCx */
787
                gen_op_iwmmxt_movl_T0_wCx(wrd);
788
                gen_ldst(stl, s);
789
            } else {
790
                gen_op_iwmmxt_movq_M0_wRn(wrd);
791
                if (insn & (1 << 8))
792
                    if (insn & (1 << 22))                /* WSTRD */
793
                        gen_ldst(iwmmxt_stq, s);
794
                    else                                /* WSTRW wRd */
795
                        gen_ldst(iwmmxt_stl, s);
796
                else
797
                    if (insn & (1 << 22))                /* WSTRH */
798
                        gen_ldst(iwmmxt_ldw, s);
799
                    else                                /* WSTRB */
800
                        gen_ldst(iwmmxt_stb, s);
801
            }
802
        }
803
        return 0;
804
    }
805

    
806
    if ((insn & 0x0f000000) != 0x0e000000)
807
        return 1;
808

    
809
    switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
810
    case 0x000:                                                /* WOR */
811
        wrd = (insn >> 12) & 0xf;
812
        rd0 = (insn >> 0) & 0xf;
813
        rd1 = (insn >> 16) & 0xf;
814
        gen_op_iwmmxt_movq_M0_wRn(rd0);
815
        gen_op_iwmmxt_orq_M0_wRn(rd1);
816
        gen_op_iwmmxt_setpsr_nz();
817
        gen_op_iwmmxt_movq_wRn_M0(wrd);
818
        gen_op_iwmmxt_set_mup();
819
        gen_op_iwmmxt_set_cup();
820
        break;
821
    case 0x011:                                                /* TMCR */
822
        if (insn & 0xf)
823
            return 1;
824
        rd = (insn >> 12) & 0xf;
825
        wrd = (insn >> 16) & 0xf;
826
        switch (wrd) {
827
        case ARM_IWMMXT_wCID:
828
        case ARM_IWMMXT_wCASF:
829
            break;
830
        case ARM_IWMMXT_wCon:
831
            gen_op_iwmmxt_set_cup();
832
            /* Fall through.  */
833
        case ARM_IWMMXT_wCSSF:
834
            gen_op_iwmmxt_movl_T0_wCx(wrd);
835
            gen_movl_T1_reg(s, rd);
836
            gen_op_bicl_T0_T1();
837
            gen_op_iwmmxt_movl_wCx_T0(wrd);
838
            break;
839
        case ARM_IWMMXT_wCGR0:
840
        case ARM_IWMMXT_wCGR1:
841
        case ARM_IWMMXT_wCGR2:
842
        case ARM_IWMMXT_wCGR3:
843
            gen_op_iwmmxt_set_cup();
844
            gen_movl_reg_T0(s, rd);
845
            gen_op_iwmmxt_movl_wCx_T0(wrd);
846
            break;
847
        default:
848
            return 1;
849
        }
850
        break;
851
    case 0x100:                                                /* WXOR */
852
        wrd = (insn >> 12) & 0xf;
853
        rd0 = (insn >> 0) & 0xf;
854
        rd1 = (insn >> 16) & 0xf;
855
        gen_op_iwmmxt_movq_M0_wRn(rd0);
856
        gen_op_iwmmxt_xorq_M0_wRn(rd1);
857
        gen_op_iwmmxt_setpsr_nz();
858
        gen_op_iwmmxt_movq_wRn_M0(wrd);
859
        gen_op_iwmmxt_set_mup();
860
        gen_op_iwmmxt_set_cup();
861
        break;
862
    case 0x111:                                                /* TMRC */
863
        if (insn & 0xf)
864
            return 1;
865
        rd = (insn >> 12) & 0xf;
866
        wrd = (insn >> 16) & 0xf;
867
        gen_op_iwmmxt_movl_T0_wCx(wrd);
868
        gen_movl_reg_T0(s, rd);
869
        break;
870
    case 0x300:                                                /* WANDN */
871
        wrd = (insn >> 12) & 0xf;
872
        rd0 = (insn >> 0) & 0xf;
873
        rd1 = (insn >> 16) & 0xf;
874
        gen_op_iwmmxt_movq_M0_wRn(rd0);
875
        gen_op_iwmmxt_negq_M0();
876
        gen_op_iwmmxt_andq_M0_wRn(rd1);
877
        gen_op_iwmmxt_setpsr_nz();
878
        gen_op_iwmmxt_movq_wRn_M0(wrd);
879
        gen_op_iwmmxt_set_mup();
880
        gen_op_iwmmxt_set_cup();
881
        break;
882
    case 0x200:                                                /* WAND */
883
        wrd = (insn >> 12) & 0xf;
884
        rd0 = (insn >> 0) & 0xf;
885
        rd1 = (insn >> 16) & 0xf;
886
        gen_op_iwmmxt_movq_M0_wRn(rd0);
887
        gen_op_iwmmxt_andq_M0_wRn(rd1);
888
        gen_op_iwmmxt_setpsr_nz();
889
        gen_op_iwmmxt_movq_wRn_M0(wrd);
890
        gen_op_iwmmxt_set_mup();
891
        gen_op_iwmmxt_set_cup();
892
        break;
893
    case 0x810: case 0xa10:                                /* WMADD */
894
        wrd = (insn >> 12) & 0xf;
895
        rd0 = (insn >> 0) & 0xf;
896
        rd1 = (insn >> 16) & 0xf;
897
        gen_op_iwmmxt_movq_M0_wRn(rd0);
898
        if (insn & (1 << 21))
899
            gen_op_iwmmxt_maddsq_M0_wRn(rd1);
900
        else
901
            gen_op_iwmmxt_madduq_M0_wRn(rd1);
902
        gen_op_iwmmxt_movq_wRn_M0(wrd);
903
        gen_op_iwmmxt_set_mup();
904
        break;
905
    case 0x10e: case 0x50e: case 0x90e: case 0xd0e:        /* WUNPCKIL */
906
        wrd = (insn >> 12) & 0xf;
907
        rd0 = (insn >> 16) & 0xf;
908
        rd1 = (insn >> 0) & 0xf;
909
        gen_op_iwmmxt_movq_M0_wRn(rd0);
910
        switch ((insn >> 22) & 3) {
911
        case 0:
912
            gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
913
            break;
914
        case 1:
915
            gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
916
            break;
917
        case 2:
918
            gen_op_iwmmxt_unpackll_M0_wRn(rd1);
919
            break;
920
        case 3:
921
            return 1;
922
        }
923
        gen_op_iwmmxt_movq_wRn_M0(wrd);
924
        gen_op_iwmmxt_set_mup();
925
        gen_op_iwmmxt_set_cup();
926
        break;
927
    case 0x10c: case 0x50c: case 0x90c: case 0xd0c:        /* WUNPCKIH */
928
        wrd = (insn >> 12) & 0xf;
929
        rd0 = (insn >> 16) & 0xf;
930
        rd1 = (insn >> 0) & 0xf;
931
        gen_op_iwmmxt_movq_M0_wRn(rd0);
932
        switch ((insn >> 22) & 3) {
933
        case 0:
934
            gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
935
            break;
936
        case 1:
937
            gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
938
            break;
939
        case 2:
940
            gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
941
            break;
942
        case 3:
943
            return 1;
944
        }
945
        gen_op_iwmmxt_movq_wRn_M0(wrd);
946
        gen_op_iwmmxt_set_mup();
947
        gen_op_iwmmxt_set_cup();
948
        break;
949
    case 0x012: case 0x112: case 0x412: case 0x512:        /* WSAD */
950
        wrd = (insn >> 12) & 0xf;
951
        rd0 = (insn >> 16) & 0xf;
952
        rd1 = (insn >> 0) & 0xf;
953
        gen_op_iwmmxt_movq_M0_wRn(rd0);
954
        if (insn & (1 << 22))
955
            gen_op_iwmmxt_sadw_M0_wRn(rd1);
956
        else
957
            gen_op_iwmmxt_sadb_M0_wRn(rd1);
958
        if (!(insn & (1 << 20)))
959
            gen_op_iwmmxt_addl_M0_wRn(wrd);
960
        gen_op_iwmmxt_movq_wRn_M0(wrd);
961
        gen_op_iwmmxt_set_mup();
962
        break;
963
    case 0x010: case 0x110: case 0x210: case 0x310:        /* WMUL */
964
        wrd = (insn >> 12) & 0xf;
965
        rd0 = (insn >> 16) & 0xf;
966
        rd1 = (insn >> 0) & 0xf;
967
        gen_op_iwmmxt_movq_M0_wRn(rd0);
968
        if (insn & (1 << 21))
969
            gen_op_iwmmxt_mulsw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
970
        else
971
            gen_op_iwmmxt_muluw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
972
        gen_op_iwmmxt_movq_wRn_M0(wrd);
973
        gen_op_iwmmxt_set_mup();
974
        break;
975
    case 0x410: case 0x510: case 0x610: case 0x710:        /* WMAC */
976
        wrd = (insn >> 12) & 0xf;
977
        rd0 = (insn >> 16) & 0xf;
978
        rd1 = (insn >> 0) & 0xf;
979
        gen_op_iwmmxt_movq_M0_wRn(rd0);
980
        if (insn & (1 << 21))
981
            gen_op_iwmmxt_macsw_M0_wRn(rd1);
982
        else
983
            gen_op_iwmmxt_macuw_M0_wRn(rd1);
984
        if (!(insn & (1 << 20))) {
985
            if (insn & (1 << 21))
986
                gen_op_iwmmxt_addsq_M0_wRn(wrd);
987
            else
988
                gen_op_iwmmxt_adduq_M0_wRn(wrd);
989
        }
990
        gen_op_iwmmxt_movq_wRn_M0(wrd);
991
        gen_op_iwmmxt_set_mup();
992
        break;
993
    case 0x006: case 0x406: case 0x806: case 0xc06:        /* WCMPEQ */
994
        wrd = (insn >> 12) & 0xf;
995
        rd0 = (insn >> 16) & 0xf;
996
        rd1 = (insn >> 0) & 0xf;
997
        gen_op_iwmmxt_movq_M0_wRn(rd0);
998
        switch ((insn >> 22) & 3) {
999
        case 0:
1000
            gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1001
            break;
1002
        case 1:
1003
            gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1004
            break;
1005
        case 2:
1006
            gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1007
            break;
1008
        case 3:
1009
            return 1;
1010
        }
1011
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1012
        gen_op_iwmmxt_set_mup();
1013
        gen_op_iwmmxt_set_cup();
1014
        break;
1015
    case 0x800: case 0x900: case 0xc00: case 0xd00:        /* WAVG2 */
1016
        wrd = (insn >> 12) & 0xf;
1017
        rd0 = (insn >> 16) & 0xf;
1018
        rd1 = (insn >> 0) & 0xf;
1019
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1020
        if (insn & (1 << 22))
1021
            gen_op_iwmmxt_avgw_M0_wRn(rd1, (insn >> 20) & 1);
1022
        else
1023
            gen_op_iwmmxt_avgb_M0_wRn(rd1, (insn >> 20) & 1);
1024
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1025
        gen_op_iwmmxt_set_mup();
1026
        gen_op_iwmmxt_set_cup();
1027
        break;
1028
    case 0x802: case 0x902: case 0xa02: case 0xb02:        /* WALIGNR */
1029
        wrd = (insn >> 12) & 0xf;
1030
        rd0 = (insn >> 16) & 0xf;
1031
        rd1 = (insn >> 0) & 0xf;
1032
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1033
        gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1034
        gen_op_movl_T1_im(7);
1035
        gen_op_andl_T0_T1();
1036
        gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1037
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1038
        gen_op_iwmmxt_set_mup();
1039
        break;
1040
    case 0x601: case 0x605: case 0x609: case 0x60d:        /* TINSR */
1041
        rd = (insn >> 12) & 0xf;
1042
        wrd = (insn >> 16) & 0xf;
1043
        gen_movl_T0_reg(s, rd);
1044
        gen_op_iwmmxt_movq_M0_wRn(wrd);
1045
        switch ((insn >> 6) & 3) {
1046
        case 0:
1047
            gen_op_movl_T1_im(0xff);
1048
            gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1049
            break;
1050
        case 1:
1051
            gen_op_movl_T1_im(0xffff);
1052
            gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1053
            break;
1054
        case 2:
1055
            gen_op_movl_T1_im(0xffffffff);
1056
            gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1057
            break;
1058
        case 3:
1059
            return 1;
1060
        }
1061
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1062
        gen_op_iwmmxt_set_mup();
1063
        break;
1064
    case 0x107: case 0x507: case 0x907: case 0xd07:        /* TEXTRM */
1065
        rd = (insn >> 12) & 0xf;
1066
        wrd = (insn >> 16) & 0xf;
1067
        if (rd == 15)
1068
            return 1;
1069
        gen_op_iwmmxt_movq_M0_wRn(wrd);
1070
        switch ((insn >> 22) & 3) {
1071
        case 0:
1072
            if (insn & 8)
1073
                gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1074
            else {
1075
                gen_op_movl_T1_im(0xff);
1076
                gen_op_iwmmxt_extru_T0_M0_T1((insn & 7) << 3);
1077
            }
1078
            break;
1079
        case 1:
1080
            if (insn & 8)
1081
                gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1082
            else {
1083
                gen_op_movl_T1_im(0xffff);
1084
                gen_op_iwmmxt_extru_T0_M0_T1((insn & 3) << 4);
1085
            }
1086
            break;
1087
        case 2:
1088
            gen_op_movl_T1_im(0xffffffff);
1089
            gen_op_iwmmxt_extru_T0_M0_T1((insn & 1) << 5);
1090
            break;
1091
        case 3:
1092
            return 1;
1093
        }
1094
        gen_movl_reg_T0(s, rd);
1095
        break;
1096
    case 0x117: case 0x517: case 0x917: case 0xd17:        /* TEXTRC */
1097
        if ((insn & 0x000ff008) != 0x0003f000)
1098
            return 1;
1099
        gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1100
        switch ((insn >> 22) & 3) {
1101
        case 0:
1102
            gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1103
            break;
1104
        case 1:
1105
            gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1106
            break;
1107
        case 2:
1108
            gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1109
            break;
1110
        case 3:
1111
            return 1;
1112
        }
1113
        gen_op_shll_T1_im(28);
1114
        gen_op_movl_T0_T1();
1115
        gen_op_movl_cpsr_T0(0xf0000000);
1116
        break;
1117
    case 0x401: case 0x405: case 0x409: case 0x40d:        /* TBCST */
1118
        rd = (insn >> 12) & 0xf;
1119
        wrd = (insn >> 16) & 0xf;
1120
        gen_movl_T0_reg(s, rd);
1121
        switch ((insn >> 6) & 3) {
1122
        case 0:
1123
            gen_op_iwmmxt_bcstb_M0_T0();
1124
            break;
1125
        case 1:
1126
            gen_op_iwmmxt_bcstw_M0_T0();
1127
            break;
1128
        case 2:
1129
            gen_op_iwmmxt_bcstl_M0_T0();
1130
            break;
1131
        case 3:
1132
            return 1;
1133
        }
1134
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1135
        gen_op_iwmmxt_set_mup();
1136
        break;
1137
    case 0x113: case 0x513: case 0x913: case 0xd13:        /* TANDC */
1138
        if ((insn & 0x000ff00f) != 0x0003f000)
1139
            return 1;
1140
        gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1141
        switch ((insn >> 22) & 3) {
1142
        case 0:
1143
            for (i = 0; i < 7; i ++) {
1144
                gen_op_shll_T1_im(4);
1145
                gen_op_andl_T0_T1();
1146
            }
1147
            break;
1148
        case 1:
1149
            for (i = 0; i < 3; i ++) {
1150
                gen_op_shll_T1_im(8);
1151
                gen_op_andl_T0_T1();
1152
            }
1153
            break;
1154
        case 2:
1155
            gen_op_shll_T1_im(16);
1156
            gen_op_andl_T0_T1();
1157
            break;
1158
        case 3:
1159
            return 1;
1160
        }
1161
        gen_op_movl_cpsr_T0(0xf0000000);
1162
        break;
1163
    case 0x01c: case 0x41c: case 0x81c: case 0xc1c:        /* WACC */
1164
        wrd = (insn >> 12) & 0xf;
1165
        rd0 = (insn >> 16) & 0xf;
1166
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1167
        switch ((insn >> 22) & 3) {
1168
        case 0:
1169
            gen_op_iwmmxt_addcb_M0();
1170
            break;
1171
        case 1:
1172
            gen_op_iwmmxt_addcw_M0();
1173
            break;
1174
        case 2:
1175
            gen_op_iwmmxt_addcl_M0();
1176
            break;
1177
        case 3:
1178
            return 1;
1179
        }
1180
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1181
        gen_op_iwmmxt_set_mup();
1182
        break;
1183
    case 0x115: case 0x515: case 0x915: case 0xd15:        /* TORC */
1184
        if ((insn & 0x000ff00f) != 0x0003f000)
1185
            return 1;
1186
        gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1187
        switch ((insn >> 22) & 3) {
1188
        case 0:
1189
            for (i = 0; i < 7; i ++) {
1190
                gen_op_shll_T1_im(4);
1191
                gen_op_orl_T0_T1();
1192
            }
1193
            break;
1194
        case 1:
1195
            for (i = 0; i < 3; i ++) {
1196
                gen_op_shll_T1_im(8);
1197
                gen_op_orl_T0_T1();
1198
            }
1199
            break;
1200
        case 2:
1201
            gen_op_shll_T1_im(16);
1202
            gen_op_orl_T0_T1();
1203
            break;
1204
        case 3:
1205
            return 1;
1206
        }
1207
        gen_op_movl_T1_im(0xf0000000);
1208
        gen_op_andl_T0_T1();
1209
        gen_op_movl_cpsr_T0(0xf0000000);
1210
        break;
1211
    case 0x103: case 0x503: case 0x903: case 0xd03:        /* TMOVMSK */
1212
        rd = (insn >> 12) & 0xf;
1213
        rd0 = (insn >> 16) & 0xf;
1214
        if ((insn & 0xf) != 0)
1215
            return 1;
1216
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1217
        switch ((insn >> 22) & 3) {
1218
        case 0:
1219
            gen_op_iwmmxt_msbb_T0_M0();
1220
            break;
1221
        case 1:
1222
            gen_op_iwmmxt_msbw_T0_M0();
1223
            break;
1224
        case 2:
1225
            gen_op_iwmmxt_msbl_T0_M0();
1226
            break;
1227
        case 3:
1228
            return 1;
1229
        }
1230
        gen_movl_reg_T0(s, rd);
1231
        break;
1232
    case 0x106: case 0x306: case 0x506: case 0x706:        /* WCMPGT */
1233
    case 0x906: case 0xb06: case 0xd06: case 0xf06:
1234
        wrd = (insn >> 12) & 0xf;
1235
        rd0 = (insn >> 16) & 0xf;
1236
        rd1 = (insn >> 0) & 0xf;
1237
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1238
        switch ((insn >> 22) & 3) {
1239
        case 0:
1240
            if (insn & (1 << 21))
1241
                gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1242
            else
1243
                gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1244
            break;
1245
        case 1:
1246
            if (insn & (1 << 21))
1247
                gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1248
            else
1249
                gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1250
            break;
1251
        case 2:
1252
            if (insn & (1 << 21))
1253
                gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1254
            else
1255
                gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1256
            break;
1257
        case 3:
1258
            return 1;
1259
        }
1260
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1261
        gen_op_iwmmxt_set_mup();
1262
        gen_op_iwmmxt_set_cup();
1263
        break;
1264
    case 0x00e: case 0x20e: case 0x40e: case 0x60e:        /* WUNPCKEL */
1265
    case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1266
        wrd = (insn >> 12) & 0xf;
1267
        rd0 = (insn >> 16) & 0xf;
1268
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1269
        switch ((insn >> 22) & 3) {
1270
        case 0:
1271
            if (insn & (1 << 21))
1272
                gen_op_iwmmxt_unpacklsb_M0();
1273
            else
1274
                gen_op_iwmmxt_unpacklub_M0();
1275
            break;
1276
        case 1:
1277
            if (insn & (1 << 21))
1278
                gen_op_iwmmxt_unpacklsw_M0();
1279
            else
1280
                gen_op_iwmmxt_unpackluw_M0();
1281
            break;
1282
        case 2:
1283
            if (insn & (1 << 21))
1284
                gen_op_iwmmxt_unpacklsl_M0();
1285
            else
1286
                gen_op_iwmmxt_unpacklul_M0();
1287
            break;
1288
        case 3:
1289
            return 1;
1290
        }
1291
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1292
        gen_op_iwmmxt_set_mup();
1293
        gen_op_iwmmxt_set_cup();
1294
        break;
1295
    case 0x00c: case 0x20c: case 0x40c: case 0x60c:        /* WUNPCKEH */
1296
    case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1297
        wrd = (insn >> 12) & 0xf;
1298
        rd0 = (insn >> 16) & 0xf;
1299
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1300
        switch ((insn >> 22) & 3) {
1301
        case 0:
1302
            if (insn & (1 << 21))
1303
                gen_op_iwmmxt_unpackhsb_M0();
1304
            else
1305
                gen_op_iwmmxt_unpackhub_M0();
1306
            break;
1307
        case 1:
1308
            if (insn & (1 << 21))
1309
                gen_op_iwmmxt_unpackhsw_M0();
1310
            else
1311
                gen_op_iwmmxt_unpackhuw_M0();
1312
            break;
1313
        case 2:
1314
            if (insn & (1 << 21))
1315
                gen_op_iwmmxt_unpackhsl_M0();
1316
            else
1317
                gen_op_iwmmxt_unpackhul_M0();
1318
            break;
1319
        case 3:
1320
            return 1;
1321
        }
1322
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1323
        gen_op_iwmmxt_set_mup();
1324
        gen_op_iwmmxt_set_cup();
1325
        break;
1326
    case 0x204: case 0x604: case 0xa04: case 0xe04:        /* WSRL */
1327
    case 0x214: case 0x614: case 0xa14: case 0xe14:
1328
        wrd = (insn >> 12) & 0xf;
1329
        rd0 = (insn >> 16) & 0xf;
1330
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1331
        if (gen_iwmmxt_shift(insn, 0xff))
1332
            return 1;
1333
        switch ((insn >> 22) & 3) {
1334
        case 0:
1335
            return 1;
1336
        case 1:
1337
            gen_op_iwmmxt_srlw_M0_T0();
1338
            break;
1339
        case 2:
1340
            gen_op_iwmmxt_srll_M0_T0();
1341
            break;
1342
        case 3:
1343
            gen_op_iwmmxt_srlq_M0_T0();
1344
            break;
1345
        }
1346
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1347
        gen_op_iwmmxt_set_mup();
1348
        gen_op_iwmmxt_set_cup();
1349
        break;
1350
    case 0x004: case 0x404: case 0x804: case 0xc04:        /* WSRA */
1351
    case 0x014: case 0x414: case 0x814: case 0xc14:
1352
        wrd = (insn >> 12) & 0xf;
1353
        rd0 = (insn >> 16) & 0xf;
1354
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1355
        if (gen_iwmmxt_shift(insn, 0xff))
1356
            return 1;
1357
        switch ((insn >> 22) & 3) {
1358
        case 0:
1359
            return 1;
1360
        case 1:
1361
            gen_op_iwmmxt_sraw_M0_T0();
1362
            break;
1363
        case 2:
1364
            gen_op_iwmmxt_sral_M0_T0();
1365
            break;
1366
        case 3:
1367
            gen_op_iwmmxt_sraq_M0_T0();
1368
            break;
1369
        }
1370
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1371
        gen_op_iwmmxt_set_mup();
1372
        gen_op_iwmmxt_set_cup();
1373
        break;
1374
    case 0x104: case 0x504: case 0x904: case 0xd04:        /* WSLL */
1375
    case 0x114: case 0x514: case 0x914: case 0xd14:
1376
        wrd = (insn >> 12) & 0xf;
1377
        rd0 = (insn >> 16) & 0xf;
1378
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1379
        if (gen_iwmmxt_shift(insn, 0xff))
1380
            return 1;
1381
        switch ((insn >> 22) & 3) {
1382
        case 0:
1383
            return 1;
1384
        case 1:
1385
            gen_op_iwmmxt_sllw_M0_T0();
1386
            break;
1387
        case 2:
1388
            gen_op_iwmmxt_slll_M0_T0();
1389
            break;
1390
        case 3:
1391
            gen_op_iwmmxt_sllq_M0_T0();
1392
            break;
1393
        }
1394
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1395
        gen_op_iwmmxt_set_mup();
1396
        gen_op_iwmmxt_set_cup();
1397
        break;
1398
    case 0x304: case 0x704: case 0xb04: case 0xf04:        /* WROR */
1399
    case 0x314: case 0x714: case 0xb14: case 0xf14:
1400
        wrd = (insn >> 12) & 0xf;
1401
        rd0 = (insn >> 16) & 0xf;
1402
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1403
        switch ((insn >> 22) & 3) {
1404
        case 0:
1405
            return 1;
1406
        case 1:
1407
            if (gen_iwmmxt_shift(insn, 0xf))
1408
                return 1;
1409
            gen_op_iwmmxt_rorw_M0_T0();
1410
            break;
1411
        case 2:
1412
            if (gen_iwmmxt_shift(insn, 0x1f))
1413
                return 1;
1414
            gen_op_iwmmxt_rorl_M0_T0();
1415
            break;
1416
        case 3:
1417
            if (gen_iwmmxt_shift(insn, 0x3f))
1418
                return 1;
1419
            gen_op_iwmmxt_rorq_M0_T0();
1420
            break;
1421
        }
1422
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1423
        gen_op_iwmmxt_set_mup();
1424
        gen_op_iwmmxt_set_cup();
1425
        break;
1426
    case 0x116: case 0x316: case 0x516: case 0x716:        /* WMIN */
1427
    case 0x916: case 0xb16: case 0xd16: case 0xf16:
1428
        wrd = (insn >> 12) & 0xf;
1429
        rd0 = (insn >> 16) & 0xf;
1430
        rd1 = (insn >> 0) & 0xf;
1431
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1432
        switch ((insn >> 22) & 3) {
1433
        case 0:
1434
            if (insn & (1 << 21))
1435
                gen_op_iwmmxt_minsb_M0_wRn(rd1);
1436
            else
1437
                gen_op_iwmmxt_minub_M0_wRn(rd1);
1438
            break;
1439
        case 1:
1440
            if (insn & (1 << 21))
1441
                gen_op_iwmmxt_minsw_M0_wRn(rd1);
1442
            else
1443
                gen_op_iwmmxt_minuw_M0_wRn(rd1);
1444
            break;
1445
        case 2:
1446
            if (insn & (1 << 21))
1447
                gen_op_iwmmxt_minsl_M0_wRn(rd1);
1448
            else
1449
                gen_op_iwmmxt_minul_M0_wRn(rd1);
1450
            break;
1451
        case 3:
1452
            return 1;
1453
        }
1454
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1455
        gen_op_iwmmxt_set_mup();
1456
        break;
1457
    case 0x016: case 0x216: case 0x416: case 0x616:        /* WMAX */
1458
    case 0x816: case 0xa16: case 0xc16: case 0xe16:
1459
        wrd = (insn >> 12) & 0xf;
1460
        rd0 = (insn >> 16) & 0xf;
1461
        rd1 = (insn >> 0) & 0xf;
1462
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1463
        switch ((insn >> 22) & 3) {
1464
        case 0:
1465
            if (insn & (1 << 21))
1466
                gen_op_iwmmxt_maxsb_M0_wRn(rd1);
1467
            else
1468
                gen_op_iwmmxt_maxub_M0_wRn(rd1);
1469
            break;
1470
        case 1:
1471
            if (insn & (1 << 21))
1472
                gen_op_iwmmxt_maxsw_M0_wRn(rd1);
1473
            else
1474
                gen_op_iwmmxt_maxuw_M0_wRn(rd1);
1475
            break;
1476
        case 2:
1477
            if (insn & (1 << 21))
1478
                gen_op_iwmmxt_maxsl_M0_wRn(rd1);
1479
            else
1480
                gen_op_iwmmxt_maxul_M0_wRn(rd1);
1481
            break;
1482
        case 3:
1483
            return 1;
1484
        }
1485
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1486
        gen_op_iwmmxt_set_mup();
1487
        break;
1488
    case 0x002: case 0x102: case 0x202: case 0x302:        /* WALIGNI */
1489
    case 0x402: case 0x502: case 0x602: case 0x702:
1490
        wrd = (insn >> 12) & 0xf;
1491
        rd0 = (insn >> 16) & 0xf;
1492
        rd1 = (insn >> 0) & 0xf;
1493
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1494
        gen_op_movl_T0_im((insn >> 20) & 3);
1495
        gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1496
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1497
        gen_op_iwmmxt_set_mup();
1498
        break;
1499
    case 0x01a: case 0x11a: case 0x21a: case 0x31a:        /* WSUB */
1500
    case 0x41a: case 0x51a: case 0x61a: case 0x71a:
1501
    case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
1502
    case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
1503
        wrd = (insn >> 12) & 0xf;
1504
        rd0 = (insn >> 16) & 0xf;
1505
        rd1 = (insn >> 0) & 0xf;
1506
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1507
        switch ((insn >> 20) & 0xf) {
1508
        case 0x0:
1509
            gen_op_iwmmxt_subnb_M0_wRn(rd1);
1510
            break;
1511
        case 0x1:
1512
            gen_op_iwmmxt_subub_M0_wRn(rd1);
1513
            break;
1514
        case 0x3:
1515
            gen_op_iwmmxt_subsb_M0_wRn(rd1);
1516
            break;
1517
        case 0x4:
1518
            gen_op_iwmmxt_subnw_M0_wRn(rd1);
1519
            break;
1520
        case 0x5:
1521
            gen_op_iwmmxt_subuw_M0_wRn(rd1);
1522
            break;
1523
        case 0x7:
1524
            gen_op_iwmmxt_subsw_M0_wRn(rd1);
1525
            break;
1526
        case 0x8:
1527
            gen_op_iwmmxt_subnl_M0_wRn(rd1);
1528
            break;
1529
        case 0x9:
1530
            gen_op_iwmmxt_subul_M0_wRn(rd1);
1531
            break;
1532
        case 0xb:
1533
            gen_op_iwmmxt_subsl_M0_wRn(rd1);
1534
            break;
1535
        default:
1536
            return 1;
1537
        }
1538
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1539
        gen_op_iwmmxt_set_mup();
1540
        gen_op_iwmmxt_set_cup();
1541
        break;
1542
    case 0x01e: case 0x11e: case 0x21e: case 0x31e:        /* WSHUFH */
1543
    case 0x41e: case 0x51e: case 0x61e: case 0x71e:
1544
    case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
1545
    case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
1546
        wrd = (insn >> 12) & 0xf;
1547
        rd0 = (insn >> 16) & 0xf;
1548
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1549
        gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
1550
        gen_op_iwmmxt_shufh_M0_T0();
1551
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1552
        gen_op_iwmmxt_set_mup();
1553
        gen_op_iwmmxt_set_cup();
1554
        break;
1555
    case 0x018: case 0x118: case 0x218: case 0x318:        /* WADD */
1556
    case 0x418: case 0x518: case 0x618: case 0x718:
1557
    case 0x818: case 0x918: case 0xa18: case 0xb18:
1558
    case 0xc18: case 0xd18: case 0xe18: case 0xf18:
1559
        wrd = (insn >> 12) & 0xf;
1560
        rd0 = (insn >> 16) & 0xf;
1561
        rd1 = (insn >> 0) & 0xf;
1562
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1563
        switch ((insn >> 20) & 0xf) {
1564
        case 0x0:
1565
            gen_op_iwmmxt_addnb_M0_wRn(rd1);
1566
            break;
1567
        case 0x1:
1568
            gen_op_iwmmxt_addub_M0_wRn(rd1);
1569
            break;
1570
        case 0x3:
1571
            gen_op_iwmmxt_addsb_M0_wRn(rd1);
1572
            break;
1573
        case 0x4:
1574
            gen_op_iwmmxt_addnw_M0_wRn(rd1);
1575
            break;
1576
        case 0x5:
1577
            gen_op_iwmmxt_adduw_M0_wRn(rd1);
1578
            break;
1579
        case 0x7:
1580
            gen_op_iwmmxt_addsw_M0_wRn(rd1);
1581
            break;
1582
        case 0x8:
1583
            gen_op_iwmmxt_addnl_M0_wRn(rd1);
1584
            break;
1585
        case 0x9:
1586
            gen_op_iwmmxt_addul_M0_wRn(rd1);
1587
            break;
1588
        case 0xb:
1589
            gen_op_iwmmxt_addsl_M0_wRn(rd1);
1590
            break;
1591
        default:
1592
            return 1;
1593
        }
1594
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1595
        gen_op_iwmmxt_set_mup();
1596
        gen_op_iwmmxt_set_cup();
1597
        break;
1598
    case 0x008: case 0x108: case 0x208: case 0x308:        /* WPACK */
1599
    case 0x408: case 0x508: case 0x608: case 0x708:
1600
    case 0x808: case 0x908: case 0xa08: case 0xb08:
1601
    case 0xc08: case 0xd08: case 0xe08: case 0xf08:
1602
        wrd = (insn >> 12) & 0xf;
1603
        rd0 = (insn >> 16) & 0xf;
1604
        rd1 = (insn >> 0) & 0xf;
1605
        gen_op_iwmmxt_movq_M0_wRn(rd0);
1606
        if (!(insn & (1 << 20)))
1607
            return 1;
1608
        switch ((insn >> 22) & 3) {
1609
        case 0:
1610
            return 1;
1611
        case 1:
1612
            if (insn & (1 << 21))
1613
                gen_op_iwmmxt_packsw_M0_wRn(rd1);
1614
            else
1615
                gen_op_iwmmxt_packuw_M0_wRn(rd1);
1616
            break;
1617
        case 2:
1618
            if (insn & (1 << 21))
1619
                gen_op_iwmmxt_packsl_M0_wRn(rd1);
1620
            else
1621
                gen_op_iwmmxt_packul_M0_wRn(rd1);
1622
            break;
1623
        case 3:
1624
            if (insn & (1 << 21))
1625
                gen_op_iwmmxt_packsq_M0_wRn(rd1);
1626
            else
1627
                gen_op_iwmmxt_packuq_M0_wRn(rd1);
1628
            break;
1629
        }
1630
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1631
        gen_op_iwmmxt_set_mup();
1632
        gen_op_iwmmxt_set_cup();
1633
        break;
1634
    case 0x201: case 0x203: case 0x205: case 0x207:
1635
    case 0x209: case 0x20b: case 0x20d: case 0x20f:
1636
    case 0x211: case 0x213: case 0x215: case 0x217:
1637
    case 0x219: case 0x21b: case 0x21d: case 0x21f:
1638
        wrd = (insn >> 5) & 0xf;
1639
        rd0 = (insn >> 12) & 0xf;
1640
        rd1 = (insn >> 0) & 0xf;
1641
        if (rd0 == 0xf || rd1 == 0xf)
1642
            return 1;
1643
        gen_op_iwmmxt_movq_M0_wRn(wrd);
1644
        switch ((insn >> 16) & 0xf) {
1645
        case 0x0:                                        /* TMIA */
1646
            gen_movl_T0_reg(s, rd0);
1647
            gen_movl_T1_reg(s, rd1);
1648
            gen_op_iwmmxt_muladdsl_M0_T0_T1();
1649
            break;
1650
        case 0x8:                                        /* TMIAPH */
1651
            gen_movl_T0_reg(s, rd0);
1652
            gen_movl_T1_reg(s, rd1);
1653
            gen_op_iwmmxt_muladdsw_M0_T0_T1();
1654
            break;
1655
        case 0xc: case 0xd: case 0xe: case 0xf:                /* TMIAxy */
1656
            gen_movl_T1_reg(s, rd0);
1657
            if (insn & (1 << 16))
1658
                gen_op_shrl_T1_im(16);
1659
            gen_op_movl_T0_T1();
1660
            gen_movl_T1_reg(s, rd1);
1661
            if (insn & (1 << 17))
1662
                gen_op_shrl_T1_im(16);
1663
            gen_op_iwmmxt_muladdswl_M0_T0_T1();
1664
            break;
1665
        default:
1666
            return 1;
1667
        }
1668
        gen_op_iwmmxt_movq_wRn_M0(wrd);
1669
        gen_op_iwmmxt_set_mup();
1670
        break;
1671
    default:
1672
        return 1;
1673
    }
1674

    
1675
    return 0;
1676
}
1677

    
1678
/* Disassemble an XScale DSP instruction.  Returns nonzero if an error occured
1679
   (ie. an undefined instruction).  */
1680
static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1681
{
1682
    int acc, rd0, rd1, rdhi, rdlo;
1683

    
1684
    if ((insn & 0x0ff00f10) == 0x0e200010) {
1685
        /* Multiply with Internal Accumulate Format */
1686
        rd0 = (insn >> 12) & 0xf;
1687
        rd1 = insn & 0xf;
1688
        acc = (insn >> 5) & 7;
1689

    
1690
        if (acc != 0)
1691
            return 1;
1692

    
1693
        switch ((insn >> 16) & 0xf) {
1694
        case 0x0:                                        /* MIA */
1695
            gen_movl_T0_reg(s, rd0);
1696
            gen_movl_T1_reg(s, rd1);
1697
            gen_op_iwmmxt_muladdsl_M0_T0_T1();
1698
            break;
1699
        case 0x8:                                        /* MIAPH */
1700
            gen_movl_T0_reg(s, rd0);
1701
            gen_movl_T1_reg(s, rd1);
1702
            gen_op_iwmmxt_muladdsw_M0_T0_T1();
1703
            break;
1704
        case 0xc:                                        /* MIABB */
1705
        case 0xd:                                        /* MIABT */
1706
        case 0xe:                                        /* MIATB */
1707
        case 0xf:                                        /* MIATT */
1708
            gen_movl_T1_reg(s, rd0);
1709
            if (insn & (1 << 16))
1710
                gen_op_shrl_T1_im(16);
1711
            gen_op_movl_T0_T1();
1712
            gen_movl_T1_reg(s, rd1);
1713
            if (insn & (1 << 17))
1714
                gen_op_shrl_T1_im(16);
1715
            gen_op_iwmmxt_muladdswl_M0_T0_T1();
1716
            break;
1717
        default:
1718
            return 1;
1719
        }
1720

    
1721
        gen_op_iwmmxt_movq_wRn_M0(acc);
1722
        return 0;
1723
    }
1724

    
1725
    if ((insn & 0x0fe00ff8) == 0x0c400000) {
1726
        /* Internal Accumulator Access Format */
1727
        rdhi = (insn >> 16) & 0xf;
1728
        rdlo = (insn >> 12) & 0xf;
1729
        acc = insn & 7;
1730

    
1731
        if (acc != 0)
1732
            return 1;
1733

    
1734
        if (insn & ARM_CP_RW_BIT) {                        /* MRA */
1735
            gen_op_iwmmxt_movl_T0_T1_wRn(acc);
1736
            gen_movl_reg_T0(s, rdlo);
1737
            gen_op_movl_T0_im((1 << (40 - 32)) - 1);
1738
            gen_op_andl_T0_T1();
1739
            gen_movl_reg_T0(s, rdhi);
1740
        } else {                                        /* MAR */
1741
            gen_movl_T0_reg(s, rdlo);
1742
            gen_movl_T1_reg(s, rdhi);
1743
            gen_op_iwmmxt_movl_wRn_T0_T1(acc);
1744
        }
1745
        return 0;
1746
    }
1747

    
1748
    return 1;
1749
}
1750

    
1751
/* Disassemble system coprocessor instruction.  Return nonzero if
1752
   instruction is not defined.  */
1753
static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1754
{
1755
    uint32_t rd = (insn >> 12) & 0xf;
1756
    uint32_t cp = (insn >> 8) & 0xf;
1757
    if (IS_USER(s)) {
1758
        return 1;
1759
    }
1760

    
1761
    if (insn & ARM_CP_RW_BIT) {
1762
        if (!env->cp[cp].cp_read)
1763
            return 1;
1764
        gen_op_movl_T0_im((uint32_t) s->pc);
1765
        gen_set_pc_T0();
1766
        gen_op_movl_T0_cp(insn);
1767
        gen_movl_reg_T0(s, rd);
1768
    } else {
1769
        if (!env->cp[cp].cp_write)
1770
            return 1;
1771
        gen_op_movl_T0_im((uint32_t) s->pc);
1772
        gen_set_pc_T0();
1773
        gen_movl_T0_reg(s, rd);
1774
        gen_op_movl_cp_T0(insn);
1775
    }
1776
    return 0;
1777
}
1778

    
1779
static int cp15_user_ok(uint32_t insn)
1780
{
1781
    int cpn = (insn >> 16) & 0xf;
1782
    int cpm = insn & 0xf;
1783
    int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
1784

    
1785
    if (cpn == 13 && cpm == 0) {
1786
        /* TLS register.  */
1787
        if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
1788
            return 1;
1789
    }
1790
    if (cpn == 7) {
1791
        /* ISB, DSB, DMB.  */
1792
        if ((cpm == 5 && op == 4)
1793
                || (cpm == 10 && (op == 4 || op == 5)))
1794
            return 1;
1795
    }
1796
    return 0;
1797
}
1798

    
1799
/* Disassemble system coprocessor (cp15) instruction.  Return nonzero if
1800
   instruction is not defined.  */
1801
static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
1802
{
1803
    uint32_t rd;
1804

    
1805
    /* M profile cores use memory mapped registers instead of cp15.  */
1806
    if (arm_feature(env, ARM_FEATURE_M))
1807
        return 1;
1808

    
1809
    if ((insn & (1 << 25)) == 0) {
1810
        if (insn & (1 << 20)) {
1811
            /* mrrc */
1812
            return 1;
1813
        }
1814
        /* mcrr.  Used for block cache operations, so implement as no-op.  */
1815
        return 0;
1816
    }
1817
    if ((insn & (1 << 4)) == 0) {
1818
        /* cdp */
1819
        return 1;
1820
    }
1821
    if (IS_USER(s) && !cp15_user_ok(insn)) {
1822
        return 1;
1823
    }
1824
    if ((insn & 0x0fff0fff) == 0x0e070f90
1825
        || (insn & 0x0fff0fff) == 0x0e070f58) {
1826
        /* Wait for interrupt.  */
1827
        gen_op_movl_T0_im((long)s->pc);
1828
        gen_set_pc_T0();
1829
        s->is_jmp = DISAS_WFI;
1830
        return 0;
1831
    }
1832
    rd = (insn >> 12) & 0xf;
1833
    if (insn & ARM_CP_RW_BIT) {
1834
        gen_op_movl_T0_cp15(insn);
1835
        /* If the destination register is r15 then sets condition codes.  */
1836
        if (rd != 15)
1837
            gen_movl_reg_T0(s, rd);
1838
    } else {
1839
        gen_movl_T0_reg(s, rd);
1840
        gen_op_movl_cp15_T0(insn);
1841
        /* Normally we would always end the TB here, but Linux
1842
         * arch/arm/mach-pxa/sleep.S expects two instructions following
1843
         * an MMU enable to execute from cache.  Imitate this behaviour.  */
1844
        if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
1845
                (insn & 0x0fff0fff) != 0x0e010f10)
1846
            gen_lookup_tb(s);
1847
    }
1848
    return 0;
1849
}
1850

    
1851
#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
1852
#define VFP_SREG(insn, bigbit, smallbit) \
1853
  ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
1854
#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
1855
    if (arm_feature(env, ARM_FEATURE_VFP3)) { \
1856
        reg = (((insn) >> (bigbit)) & 0x0f) \
1857
              | (((insn) >> ((smallbit) - 4)) & 0x10); \
1858
    } else { \
1859
        if (insn & (1 << (smallbit))) \
1860
            return 1; \
1861
        reg = ((insn) >> (bigbit)) & 0x0f; \
1862
    }} while (0)
1863

    
1864
#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
1865
#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
1866
#define VFP_SREG_N(insn) VFP_SREG(insn, 16,  7)
1867
#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16,  7)
1868
#define VFP_SREG_M(insn) VFP_SREG(insn,  0,  5)
1869
#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn,  0,  5)
1870

    
1871
static inline int
1872
vfp_enabled(CPUState * env)
1873
{
1874
    return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
1875
}
1876

    
1877
/* Disassemble a VFP instruction.  Returns nonzero if an error occured
1878
   (ie. an undefined instruction).  */
1879
static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
1880
{
1881
    uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
1882
    int dp, veclen;
1883

    
1884
    if (!arm_feature(env, ARM_FEATURE_VFP))
1885
        return 1;
1886

    
1887
    if (!vfp_enabled(env)) {
1888
        /* VFP disabled.  Only allow fmxr/fmrx to/from some control regs.  */
1889
        if ((insn & 0x0fe00fff) != 0x0ee00a10)
1890
            return 1;
1891
        rn = (insn >> 16) & 0xf;
1892
        if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
1893
            && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
1894
            return 1;
1895
    }
1896
    dp = ((insn & 0xf00) == 0xb00);
1897
    switch ((insn >> 24) & 0xf) {
1898
    case 0xe:
1899
        if (insn & (1 << 4)) {
1900
            /* single register transfer */
1901
            rd = (insn >> 12) & 0xf;
1902
            if (dp) {
1903
                int size;
1904
                int pass;
1905

    
1906
                VFP_DREG_N(rn, insn);
1907
                if (insn & 0xf)
1908
                    return 1;
1909
                if (insn & 0x00c00060
1910
                    && !arm_feature(env, ARM_FEATURE_NEON))
1911
                    return 1;
1912

    
1913
                pass = (insn >> 21) & 1;
1914
                if (insn & (1 << 22)) {
1915
                    size = 0;
1916
                    offset = ((insn >> 5) & 3) * 8;
1917
                } else if (insn & (1 << 5)) {
1918
                    size = 1;
1919
                    offset = (insn & (1 << 6)) ? 16 : 0;
1920
                } else {
1921
                    size = 2;
1922
                    offset = 0;
1923
                }
1924
                if (insn & ARM_CP_RW_BIT) {
1925
                    /* vfp->arm */
1926
                    switch (size) {
1927
                    case 0:
1928
                        NEON_GET_REG(T1, rn, pass);
1929
                        if (offset)
1930
                            gen_op_shrl_T1_im(offset);
1931
                        if (insn & (1 << 23))
1932
                            gen_uxtb(cpu_T[1]);
1933
                        else
1934
                            gen_sxtb(cpu_T[1]);
1935
                        break;
1936
                    case 1:
1937
                        NEON_GET_REG(T1, rn, pass);
1938
                        if (insn & (1 << 23)) {
1939
                            if (offset) {
1940
                                gen_op_shrl_T1_im(16);
1941
                            } else {
1942
                                gen_uxth(cpu_T[1]);
1943
                            }
1944
                        } else {
1945
                            if (offset) {
1946
                                gen_op_sarl_T1_im(16);
1947
                            } else {
1948
                                gen_sxth(cpu_T[1]);
1949
                            }
1950
                        }
1951
                        break;
1952
                    case 2:
1953
                        NEON_GET_REG(T1, rn, pass);
1954
                        break;
1955
                    }
1956
                    gen_movl_reg_T1(s, rd);
1957
                } else {
1958
                    /* arm->vfp */
1959
                    gen_movl_T0_reg(s, rd);
1960
                    if (insn & (1 << 23)) {
1961
                        /* VDUP */
1962
                        if (size == 0) {
1963
                            gen_op_neon_dup_u8(0);
1964
                        } else if (size == 1) {
1965
                            gen_op_neon_dup_low16();
1966
                        }
1967
                        NEON_SET_REG(T0, rn, 0);
1968
                        NEON_SET_REG(T0, rn, 1);
1969
                    } else {
1970
                        /* VMOV */
1971
                        switch (size) {
1972
                        case 0:
1973
                            NEON_GET_REG(T2, rn, pass);
1974
                            gen_op_movl_T1_im(0xff);
1975
                            gen_op_andl_T0_T1();
1976
                            gen_op_neon_insert_elt(offset, ~(0xff << offset));
1977
                            NEON_SET_REG(T2, rn, pass);
1978
                            break;
1979
                        case 1:
1980
                            NEON_GET_REG(T2, rn, pass);
1981
                            gen_op_movl_T1_im(0xffff);
1982
                            gen_op_andl_T0_T1();
1983
                            bank_mask = offset ? 0xffff : 0xffff0000;
1984
                            gen_op_neon_insert_elt(offset, bank_mask);
1985
                            NEON_SET_REG(T2, rn, pass);
1986
                            break;
1987
                        case 2:
1988
                            NEON_SET_REG(T0, rn, pass);
1989
                            break;
1990
                        }
1991
                    }
1992
                }
1993
            } else { /* !dp */
1994
                if ((insn & 0x6f) != 0x00)
1995
                    return 1;
1996
                rn = VFP_SREG_N(insn);
1997
                if (insn & ARM_CP_RW_BIT) {
1998
                    /* vfp->arm */
1999
                    if (insn & (1 << 21)) {
2000
                        /* system register */
2001
                        rn >>= 1;
2002

    
2003
                        switch (rn) {
2004
                        case ARM_VFP_FPSID:
2005
                            /* VFP2 allows access for FSID from userspace.
2006
                               VFP3 restricts all id registers to privileged
2007
                               accesses.  */
2008
                            if (IS_USER(s)
2009
                                && arm_feature(env, ARM_FEATURE_VFP3))
2010
                                return 1;
2011
                            gen_op_vfp_movl_T0_xreg(rn);
2012
                            break;
2013
                        case ARM_VFP_FPEXC:
2014
                            if (IS_USER(s))
2015
                                return 1;
2016
                            gen_op_vfp_movl_T0_xreg(rn);
2017
                            break;
2018
                        case ARM_VFP_FPINST:
2019
                        case ARM_VFP_FPINST2:
2020
                            /* Not present in VFP3.  */
2021
                            if (IS_USER(s)
2022
                                || arm_feature(env, ARM_FEATURE_VFP3))
2023
                                return 1;
2024
                            gen_op_vfp_movl_T0_xreg(rn);
2025
                            break;
2026
                        case ARM_VFP_FPSCR:
2027
                            if (rd == 15)
2028
                                gen_op_vfp_movl_T0_fpscr_flags();
2029
                            else
2030
                                gen_op_vfp_movl_T0_fpscr();
2031
                            break;
2032
                        case ARM_VFP_MVFR0:
2033
                        case ARM_VFP_MVFR1:
2034
                            if (IS_USER(s)
2035
                                || !arm_feature(env, ARM_FEATURE_VFP3))
2036
                                return 1;
2037
                            gen_op_vfp_movl_T0_xreg(rn);
2038
                            break;
2039
                        default:
2040
                            return 1;
2041
                        }
2042
                    } else {
2043
                        gen_mov_F0_vreg(0, rn);
2044
                        gen_op_vfp_mrs();
2045
                    }
2046
                    if (rd == 15) {
2047
                        /* Set the 4 flag bits in the CPSR.  */
2048
                        gen_op_movl_cpsr_T0(0xf0000000);
2049
                    } else
2050
                        gen_movl_reg_T0(s, rd);
2051
                } else {
2052
                    /* arm->vfp */
2053
                    gen_movl_T0_reg(s, rd);
2054
                    if (insn & (1 << 21)) {
2055
                        rn >>= 1;
2056
                        /* system register */
2057
                        switch (rn) {
2058
                        case ARM_VFP_FPSID:
2059
                        case ARM_VFP_MVFR0:
2060
                        case ARM_VFP_MVFR1:
2061
                            /* Writes are ignored.  */
2062
                            break;
2063
                        case ARM_VFP_FPSCR:
2064
                            gen_op_vfp_movl_fpscr_T0();
2065
                            gen_lookup_tb(s);
2066
                            break;
2067
                        case ARM_VFP_FPEXC:
2068
                            if (IS_USER(s))
2069
                                return 1;
2070
                            gen_op_vfp_movl_xreg_T0(rn);
2071
                            gen_lookup_tb(s);
2072
                            break;
2073
                        case ARM_VFP_FPINST:
2074
                        case ARM_VFP_FPINST2:
2075
                            gen_op_vfp_movl_xreg_T0(rn);
2076
                            break;
2077
                        default:
2078
                            return 1;
2079
                        }
2080
                    } else {
2081
                        gen_op_vfp_msr();
2082
                        gen_mov_vreg_F0(0, rn);
2083
                    }
2084
                }
2085
            }
2086
        } else {
2087
            /* data processing */
2088
            /* The opcode is in bits 23, 21, 20 and 6.  */
2089
            op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2090
            if (dp) {
2091
                if (op == 15) {
2092
                    /* rn is opcode */
2093
                    rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2094
                } else {
2095
                    /* rn is register number */
2096
                    VFP_DREG_N(rn, insn);
2097
                }
2098

    
2099
                if (op == 15 && (rn == 15 || rn > 17)) {
2100
                    /* Integer or single precision destination.  */
2101
                    rd = VFP_SREG_D(insn);
2102
                } else {
2103
                    VFP_DREG_D(rd, insn);
2104
                }
2105

    
2106
                if (op == 15 && (rn == 16 || rn == 17)) {
2107
                    /* Integer source.  */
2108
                    rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2109
                } else {
2110
                    VFP_DREG_M(rm, insn);
2111
                }
2112
            } else {
2113
                rn = VFP_SREG_N(insn);
2114
                if (op == 15 && rn == 15) {
2115
                    /* Double precision destination.  */
2116
                    VFP_DREG_D(rd, insn);
2117
                } else {
2118
                    rd = VFP_SREG_D(insn);
2119
                }
2120
                rm = VFP_SREG_M(insn);
2121
            }
2122

    
2123
            veclen = env->vfp.vec_len;
2124
            if (op == 15 && rn > 3)
2125
                veclen = 0;
2126

    
2127
            /* Shut up compiler warnings.  */
2128
            delta_m = 0;
2129
            delta_d = 0;
2130
            bank_mask = 0;
2131

    
2132
            if (veclen > 0) {
2133
                if (dp)
2134
                    bank_mask = 0xc;
2135
                else
2136
                    bank_mask = 0x18;
2137

    
2138
                /* Figure out what type of vector operation this is.  */
2139
                if ((rd & bank_mask) == 0) {
2140
                    /* scalar */
2141
                    veclen = 0;
2142
                } else {
2143
                    if (dp)
2144
                        delta_d = (env->vfp.vec_stride >> 1) + 1;
2145
                    else
2146
                        delta_d = env->vfp.vec_stride + 1;
2147

    
2148
                    if ((rm & bank_mask) == 0) {
2149
                        /* mixed scalar/vector */
2150
                        delta_m = 0;
2151
                    } else {
2152
                        /* vector */
2153
                        delta_m = delta_d;
2154
                    }
2155
                }
2156
            }
2157

    
2158
            /* Load the initial operands.  */
2159
            if (op == 15) {
2160
                switch (rn) {
2161
                case 16:
2162
                case 17:
2163
                    /* Integer source */
2164
                    gen_mov_F0_vreg(0, rm);
2165
                    break;
2166
                case 8:
2167
                case 9:
2168
                    /* Compare */
2169
                    gen_mov_F0_vreg(dp, rd);
2170
                    gen_mov_F1_vreg(dp, rm);
2171
                    break;
2172
                case 10:
2173
                case 11:
2174
                    /* Compare with zero */
2175
                    gen_mov_F0_vreg(dp, rd);
2176
                    gen_vfp_F1_ld0(dp);
2177
                    break;
2178
                case 20:
2179
                case 21:
2180
                case 22:
2181
                case 23:
2182
                    /* Source and destination the same.  */
2183
                    gen_mov_F0_vreg(dp, rd);
2184
                    break;
2185
                default:
2186
                    /* One source operand.  */
2187
                    gen_mov_F0_vreg(dp, rm);
2188
                    break;
2189
                }
2190
            } else {
2191
                /* Two source operands.  */
2192
                gen_mov_F0_vreg(dp, rn);
2193
                gen_mov_F1_vreg(dp, rm);
2194
            }
2195

    
2196
            for (;;) {
2197
                /* Perform the calculation.  */
2198
                switch (op) {
2199
                case 0: /* mac: fd + (fn * fm) */
2200
                    gen_vfp_mul(dp);
2201
                    gen_mov_F1_vreg(dp, rd);
2202
                    gen_vfp_add(dp);
2203
                    break;
2204
                case 1: /* nmac: fd - (fn * fm) */
2205
                    gen_vfp_mul(dp);
2206
                    gen_vfp_neg(dp);
2207
                    gen_mov_F1_vreg(dp, rd);
2208
                    gen_vfp_add(dp);
2209
                    break;
2210
                case 2: /* msc: -fd + (fn * fm) */
2211
                    gen_vfp_mul(dp);
2212
                    gen_mov_F1_vreg(dp, rd);
2213
                    gen_vfp_sub(dp);
2214
                    break;
2215
                case 3: /* nmsc: -fd - (fn * fm)  */
2216
                    gen_vfp_mul(dp);
2217
                    gen_mov_F1_vreg(dp, rd);
2218
                    gen_vfp_add(dp);
2219
                    gen_vfp_neg(dp);
2220
                    break;
2221
                case 4: /* mul: fn * fm */
2222
                    gen_vfp_mul(dp);
2223
                    break;
2224
                case 5: /* nmul: -(fn * fm) */
2225
                    gen_vfp_mul(dp);
2226
                    gen_vfp_neg(dp);
2227
                    break;
2228
                case 6: /* add: fn + fm */
2229
                    gen_vfp_add(dp);
2230
                    break;
2231
                case 7: /* sub: fn - fm */
2232
                    gen_vfp_sub(dp);
2233
                    break;
2234
                case 8: /* div: fn / fm */
2235
                    gen_vfp_div(dp);
2236
                    break;
2237
                case 14: /* fconst */
2238
                    if (!arm_feature(env, ARM_FEATURE_VFP3))
2239
                      return 1;
2240

    
2241
                    n = (insn << 12) & 0x80000000;
2242
                    i = ((insn >> 12) & 0x70) | (insn & 0xf);
2243
                    if (dp) {
2244
                        if (i & 0x40)
2245
                            i |= 0x3f80;
2246
                        else
2247
                            i |= 0x4000;
2248
                        n |= i << 16;
2249
                    } else {
2250
                        if (i & 0x40)
2251
                            i |= 0x780;
2252
                        else
2253
                            i |= 0x800;
2254
                        n |= i << 19;
2255
                    }
2256
                    gen_vfp_fconst(dp, n);
2257
                    break;
2258
                case 15: /* extension space */
2259
                    switch (rn) {
2260
                    case 0: /* cpy */
2261
                        /* no-op */
2262
                        break;
2263
                    case 1: /* abs */
2264
                        gen_vfp_abs(dp);
2265
                        break;
2266
                    case 2: /* neg */
2267
                        gen_vfp_neg(dp);
2268
                        break;
2269
                    case 3: /* sqrt */
2270
                        gen_vfp_sqrt(dp);
2271
                        break;
2272
                    case 8: /* cmp */
2273
                        gen_vfp_cmp(dp);
2274
                        break;
2275
                    case 9: /* cmpe */
2276
                        gen_vfp_cmpe(dp);
2277
                        break;
2278
                    case 10: /* cmpz */
2279
                        gen_vfp_cmp(dp);
2280
                        break;
2281
                    case 11: /* cmpez */
2282
                        gen_vfp_F1_ld0(dp);
2283
                        gen_vfp_cmpe(dp);
2284
                        break;
2285
                    case 15: /* single<->double conversion */
2286
                        if (dp)
2287
                            gen_op_vfp_fcvtsd();
2288
                        else
2289
                            gen_op_vfp_fcvtds();
2290
                        break;
2291
                    case 16: /* fuito */
2292
                        gen_vfp_uito(dp);
2293
                        break;
2294
                    case 17: /* fsito */
2295
                        gen_vfp_sito(dp);
2296
                        break;
2297
                    case 20: /* fshto */
2298
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2299
                          return 1;
2300
                        gen_vfp_shto(dp, rm);
2301
                        break;
2302
                    case 21: /* fslto */
2303
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2304
                          return 1;
2305
                        gen_vfp_slto(dp, rm);
2306
                        break;
2307
                    case 22: /* fuhto */
2308
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2309
                          return 1;
2310
                        gen_vfp_uhto(dp, rm);
2311
                        break;
2312
                    case 23: /* fulto */
2313
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2314
                          return 1;
2315
                        gen_vfp_ulto(dp, rm);
2316
                        break;
2317
                    case 24: /* ftoui */
2318
                        gen_vfp_toui(dp);
2319
                        break;
2320
                    case 25: /* ftouiz */
2321
                        gen_vfp_touiz(dp);
2322
                        break;
2323
                    case 26: /* ftosi */
2324
                        gen_vfp_tosi(dp);
2325
                        break;
2326
                    case 27: /* ftosiz */
2327
                        gen_vfp_tosiz(dp);
2328
                        break;
2329
                    case 28: /* ftosh */
2330
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2331
                          return 1;
2332
                        gen_vfp_tosh(dp, rm);
2333
                        break;
2334
                    case 29: /* ftosl */
2335
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2336
                          return 1;
2337
                        gen_vfp_tosl(dp, rm);
2338
                        break;
2339
                    case 30: /* ftouh */
2340
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2341
                          return 1;
2342
                        gen_vfp_touh(dp, rm);
2343
                        break;
2344
                    case 31: /* ftoul */
2345
                        if (!arm_feature(env, ARM_FEATURE_VFP3))
2346
                          return 1;
2347
                        gen_vfp_toul(dp, rm);
2348
                        break;
2349
                    default: /* undefined */
2350
                        printf ("rn:%d\n", rn);
2351
                        return 1;
2352
                    }
2353
                    break;
2354
                default: /* undefined */
2355
                    printf ("op:%d\n", op);
2356
                    return 1;
2357
                }
2358

    
2359
                /* Write back the result.  */
2360
                if (op == 15 && (rn >= 8 && rn <= 11))
2361
                    ; /* Comparison, do nothing.  */
2362
                else if (op == 15 && rn > 17)
2363
                    /* Integer result.  */
2364
                    gen_mov_vreg_F0(0, rd);
2365
                else if (op == 15 && rn == 15)
2366
                    /* conversion */
2367
                    gen_mov_vreg_F0(!dp, rd);
2368
                else
2369
                    gen_mov_vreg_F0(dp, rd);
2370

    
2371
                /* break out of the loop if we have finished  */
2372
                if (veclen == 0)
2373
                    break;
2374

    
2375
                if (op == 15 && delta_m == 0) {
2376
                    /* single source one-many */
2377
                    while (veclen--) {
2378
                        rd = ((rd + delta_d) & (bank_mask - 1))
2379
                             | (rd & bank_mask);
2380
                        gen_mov_vreg_F0(dp, rd);
2381
                    }
2382
                    break;
2383
                }
2384
                /* Setup the next operands.  */
2385
                veclen--;
2386
                rd = ((rd + delta_d) & (bank_mask - 1))
2387
                     | (rd & bank_mask);
2388

    
2389
                if (op == 15) {
2390
                    /* One source operand.  */
2391
                    rm = ((rm + delta_m) & (bank_mask - 1))
2392
                         | (rm & bank_mask);
2393
                    gen_mov_F0_vreg(dp, rm);
2394
                } else {
2395
                    /* Two source operands.  */
2396
                    rn = ((rn + delta_d) & (bank_mask - 1))
2397
                         | (rn & bank_mask);
2398
                    gen_mov_F0_vreg(dp, rn);
2399
                    if (delta_m) {
2400
                        rm = ((rm + delta_m) & (bank_mask - 1))
2401
                             | (rm & bank_mask);
2402
                        gen_mov_F1_vreg(dp, rm);
2403
                    }
2404
                }
2405
            }
2406
        }
2407
        break;
2408
    case 0xc:
2409
    case 0xd:
2410
        if (dp && (insn & 0x03e00000) == 0x00400000) {
2411
            /* two-register transfer */
2412
            rn = (insn >> 16) & 0xf;
2413
            rd = (insn >> 12) & 0xf;
2414
            if (dp) {
2415
                VFP_DREG_M(rm, insn);
2416
            } else {
2417
                rm = VFP_SREG_M(insn);
2418
            }
2419

    
2420
            if (insn & ARM_CP_RW_BIT) {
2421
                /* vfp->arm */
2422
                if (dp) {
2423
                    gen_mov_F0_vreg(1, rm);
2424
                    gen_op_vfp_mrrd();
2425
                    gen_movl_reg_T0(s, rd);
2426
                    gen_movl_reg_T1(s, rn);
2427
                } else {
2428
                    gen_mov_F0_vreg(0, rm);
2429
                    gen_op_vfp_mrs();
2430
                    gen_movl_reg_T0(s, rn);
2431
                    gen_mov_F0_vreg(0, rm + 1);
2432
                    gen_op_vfp_mrs();
2433
                    gen_movl_reg_T0(s, rd);
2434
                }
2435
            } else {
2436
                /* arm->vfp */
2437
                if (dp) {
2438
                    gen_movl_T0_reg(s, rd);
2439
                    gen_movl_T1_reg(s, rn);
2440
                    gen_op_vfp_mdrr();
2441
                    gen_mov_vreg_F0(1, rm);
2442
                } else {
2443
                    gen_movl_T0_reg(s, rn);
2444
                    gen_op_vfp_msr();
2445
                    gen_mov_vreg_F0(0, rm);
2446
                    gen_movl_T0_reg(s, rd);
2447
                    gen_op_vfp_msr();
2448
                    gen_mov_vreg_F0(0, rm + 1);
2449
                }
2450
            }
2451
        } else {
2452
            /* Load/store */
2453
            rn = (insn >> 16) & 0xf;
2454
            if (dp)
2455
                VFP_DREG_D(rd, insn);
2456
            else
2457
                rd = VFP_SREG_D(insn);
2458
            if (s->thumb && rn == 15) {
2459
                gen_op_movl_T1_im(s->pc & ~2);
2460
            } else {
2461
                gen_movl_T1_reg(s, rn);
2462
            }
2463
            if ((insn & 0x01200000) == 0x01000000) {
2464
                /* Single load/store */
2465
                offset = (insn & 0xff) << 2;
2466
                if ((insn & (1 << 23)) == 0)
2467
                    offset = -offset;
2468
                gen_op_addl_T1_im(offset);
2469
                if (insn & (1 << 20)) {
2470
                    gen_vfp_ld(s, dp);
2471
                    gen_mov_vreg_F0(dp, rd);
2472
                } else {
2473
                    gen_mov_F0_vreg(dp, rd);
2474
                    gen_vfp_st(s, dp);
2475
                }
2476
            } else {
2477
                /* load/store multiple */
2478
                if (dp)
2479
                    n = (insn >> 1) & 0x7f;
2480
                else
2481
                    n = insn & 0xff;
2482

    
2483
                if (insn & (1 << 24)) /* pre-decrement */
2484
                    gen_op_addl_T1_im(-((insn & 0xff) << 2));
2485

    
2486
                if (dp)
2487
                    offset = 8;
2488
                else
2489
                    offset = 4;
2490
                for (i = 0; i < n; i++) {
2491
                    if (insn & ARM_CP_RW_BIT) {
2492
                        /* load */
2493
                        gen_vfp_ld(s, dp);
2494
                        gen_mov_vreg_F0(dp, rd + i);
2495
                    } else {
2496
                        /* store */
2497
                        gen_mov_F0_vreg(dp, rd + i);
2498
                        gen_vfp_st(s, dp);
2499
                    }
2500
                    gen_op_addl_T1_im(offset);
2501
                }
2502
                if (insn & (1 << 21)) {
2503
                    /* writeback */
2504
                    if (insn & (1 << 24))
2505
                        offset = -offset * n;
2506
                    else if (dp && (insn & 1))
2507
                        offset = 4;
2508
                    else
2509
                        offset = 0;
2510

    
2511
                    if (offset != 0)
2512
                        gen_op_addl_T1_im(offset);
2513
                    gen_movl_reg_T1(s, rn);
2514
                }
2515
            }
2516
        }
2517
        break;
2518
    default:
2519
        /* Should never happen.  */
2520
        return 1;
2521
    }
2522
    return 0;
2523
}
2524

    
2525
static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
2526
{
2527
    TranslationBlock *tb;
2528

    
2529
    tb = s->tb;
2530
    if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
2531
        tcg_gen_goto_tb(n);
2532
        gen_op_movl_T0_im(dest);
2533
        gen_set_pc_T0();
2534
        tcg_gen_exit_tb((long)tb + n);
2535
    } else {
2536
        gen_op_movl_T0_im(dest);
2537
        gen_set_pc_T0();
2538
        tcg_gen_exit_tb(0);
2539
    }
2540
}
2541

    
2542
static inline void gen_jmp (DisasContext *s, uint32_t dest)
2543
{
2544
    if (__builtin_expect(s->singlestep_enabled, 0)) {
2545
        /* An indirect jump so that we still trigger the debug exception.  */
2546
        if (s->thumb)
2547
          dest |= 1;
2548
        gen_op_movl_T0_im(dest);
2549
        gen_bx(s);
2550
    } else {
2551
        gen_goto_tb(s, 0, dest);
2552
        s->is_jmp = DISAS_TB_JUMP;
2553
    }
2554
}
2555

    
2556
static inline void gen_mulxy(int x, int y)
2557
{
2558
    if (x)
2559
        tcg_gen_sari_i32(cpu_T[0], cpu_T[0], 16);
2560
    else
2561
        gen_sxth(cpu_T[0]);
2562
    if (y)
2563
        gen_op_sarl_T1_im(16);
2564
    else
2565
        gen_sxth(cpu_T[1]);
2566
    gen_op_mul_T0_T1();
2567
}
2568

    
2569
/* Return the mask of PSR bits set by a MSR instruction.  */
2570
static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
2571
    uint32_t mask;
2572

    
2573
    mask = 0;
2574
    if (flags & (1 << 0))
2575
        mask |= 0xff;
2576
    if (flags & (1 << 1))
2577
        mask |= 0xff00;
2578
    if (flags & (1 << 2))
2579
        mask |= 0xff0000;
2580
    if (flags & (1 << 3))
2581
        mask |= 0xff000000;
2582

    
2583
    /* Mask out undefined bits.  */
2584
    mask &= ~CPSR_RESERVED;
2585
    if (!arm_feature(env, ARM_FEATURE_V6))
2586
        mask &= ~(CPSR_E | CPSR_GE);
2587
    if (!arm_feature(env, ARM_FEATURE_THUMB2))
2588
        mask &= ~CPSR_IT;
2589
    /* Mask out execution state bits.  */
2590
    if (!spsr)
2591
        mask &= ~CPSR_EXEC;
2592
    /* Mask out privileged bits.  */
2593
    if (IS_USER(s))
2594
        mask &= CPSR_USER;
2595
    return mask;
2596
}
2597

    
2598
/* Returns nonzero if access to the PSR is not permitted.  */
2599
static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
2600
{
2601
    if (spsr) {
2602
        /* ??? This is also undefined in system mode.  */
2603
        if (IS_USER(s))
2604
            return 1;
2605
        gen_op_movl_spsr_T0(mask);
2606
    } else {
2607
        gen_op_movl_cpsr_T0(mask);
2608
    }
2609
    gen_lookup_tb(s);
2610
    return 0;
2611
}
2612

    
2613
/* Generate an old-style exception return.  */
2614
static void gen_exception_return(DisasContext *s)
2615
{
2616
    gen_set_pc_T0();
2617
    gen_op_movl_T0_spsr();
2618
    gen_op_movl_cpsr_T0(0xffffffff);
2619
    s->is_jmp = DISAS_UPDATE;
2620
}
2621

    
2622
/* Generate a v6 exception return.  */
2623
static void gen_rfe(DisasContext *s)
2624
{
2625
    gen_op_movl_cpsr_T0(0xffffffff);
2626
    gen_op_movl_T0_T2();
2627
    gen_set_pc_T0();
2628
    s->is_jmp = DISAS_UPDATE;
2629
}
2630

    
2631
static inline void
2632
gen_set_condexec (DisasContext *s)
2633
{
2634
    if (s->condexec_mask) {
2635
        gen_op_set_condexec((s->condexec_cond << 4) | (s->condexec_mask >> 1));
2636
    }
2637
}
2638

    
2639
static void gen_nop_hint(DisasContext *s, int val)
2640
{
2641
    switch (val) {
2642
    case 3: /* wfi */
2643
        gen_op_movl_T0_im((long)s->pc);
2644
        gen_set_pc_T0();
2645
        s->is_jmp = DISAS_WFI;
2646
        break;
2647
    case 2: /* wfe */
2648
    case 4: /* sev */
2649
        /* TODO: Implement SEV and WFE.  May help SMP performance.  */
2650
    default: /* nop */
2651
        break;
2652
    }
2653
}
2654

    
2655
/* Neon shift by constant.  The actual ops are the same as used for variable
2656
   shifts.  [OP][U][SIZE]  */
2657
static GenOpFunc *gen_neon_shift_im[8][2][4] = {
2658
    { /* 0 */ /* VSHR */
2659
      {
2660
        gen_op_neon_shl_u8,
2661
        gen_op_neon_shl_u16,
2662
        gen_op_neon_shl_u32,
2663
        gen_op_neon_shl_u64
2664
      }, {
2665
        gen_op_neon_shl_s8,
2666
        gen_op_neon_shl_s16,
2667
        gen_op_neon_shl_s32,
2668
        gen_op_neon_shl_s64
2669
      }
2670
    }, { /* 1 */ /* VSRA */
2671
      {
2672
        gen_op_neon_shl_u8,
2673
        gen_op_neon_shl_u16,
2674
        gen_op_neon_shl_u32,
2675
        gen_op_neon_shl_u64
2676
      }, {
2677
        gen_op_neon_shl_s8,
2678
        gen_op_neon_shl_s16,
2679
        gen_op_neon_shl_s32,
2680
        gen_op_neon_shl_s64
2681
      }
2682
    }, { /* 2 */ /* VRSHR */
2683
      {
2684
        gen_op_neon_rshl_u8,
2685
        gen_op_neon_rshl_u16,
2686
        gen_op_neon_rshl_u32,
2687
        gen_op_neon_rshl_u64
2688
      }, {
2689
        gen_op_neon_rshl_s8,
2690
        gen_op_neon_rshl_s16,
2691
        gen_op_neon_rshl_s32,
2692
        gen_op_neon_rshl_s64
2693
      }
2694
    }, { /* 3 */ /* VRSRA */
2695
      {
2696
        gen_op_neon_rshl_u8,
2697
        gen_op_neon_rshl_u16,
2698
        gen_op_neon_rshl_u32,
2699
        gen_op_neon_rshl_u64
2700
      }, {
2701
        gen_op_neon_rshl_s8,
2702
        gen_op_neon_rshl_s16,
2703
        gen_op_neon_rshl_s32,
2704
        gen_op_neon_rshl_s64
2705
      }
2706
    }, { /* 4 */
2707
      {
2708
        NULL, NULL, NULL, NULL
2709
      }, { /* VSRI */
2710
        gen_op_neon_shl_u8,
2711
        gen_op_neon_shl_u16,
2712
        gen_op_neon_shl_u32,
2713
        gen_op_neon_shl_u64,
2714
      }
2715
    }, { /* 5 */
2716
      { /* VSHL */
2717
        gen_op_neon_shl_u8,
2718
        gen_op_neon_shl_u16,
2719
        gen_op_neon_shl_u32,
2720
        gen_op_neon_shl_u64,
2721
      }, { /* VSLI */
2722
        gen_op_neon_shl_u8,
2723
        gen_op_neon_shl_u16,
2724
        gen_op_neon_shl_u32,
2725
        gen_op_neon_shl_u64,
2726
      }
2727
    }, { /* 6 */ /* VQSHL */
2728
      {
2729
        gen_op_neon_qshl_u8,
2730
        gen_op_neon_qshl_u16,
2731
        gen_op_neon_qshl_u32,
2732
        gen_op_neon_qshl_u64
2733
      }, {
2734
        gen_op_neon_qshl_s8,
2735
        gen_op_neon_qshl_s16,
2736
        gen_op_neon_qshl_s32,
2737
        gen_op_neon_qshl_s64
2738
      }
2739
    }, { /* 7 */ /* VQSHLU */
2740
      {
2741
        gen_op_neon_qshl_u8,
2742
        gen_op_neon_qshl_u16,
2743
        gen_op_neon_qshl_u32,
2744
        gen_op_neon_qshl_u64
2745
      }, {
2746
        gen_op_neon_qshl_u8,
2747
        gen_op_neon_qshl_u16,
2748
        gen_op_neon_qshl_u32,
2749
        gen_op_neon_qshl_u64
2750
      }
2751
    }
2752
};
2753

    
2754
/* [R][U][size - 1] */
2755
static GenOpFunc *gen_neon_shift_im_narrow[2][2][3] = {
2756
    {
2757
      {
2758
        gen_op_neon_shl_u16,
2759
        gen_op_neon_shl_u32,
2760
        gen_op_neon_shl_u64
2761
      }, {
2762
        gen_op_neon_shl_s16,
2763
        gen_op_neon_shl_s32,
2764
        gen_op_neon_shl_s64
2765
      }
2766
    }, {
2767
      {
2768
        gen_op_neon_rshl_u16,
2769
        gen_op_neon_rshl_u32,
2770
        gen_op_neon_rshl_u64
2771
      }, {
2772
        gen_op_neon_rshl_s16,
2773
        gen_op_neon_rshl_s32,
2774
        gen_op_neon_rshl_s64
2775
      }
2776
    }
2777
};
2778

    
2779
static inline void
2780
gen_op_neon_narrow_u32 ()
2781
{
2782
    /* No-op.  */
2783
}
2784

    
2785
static GenOpFunc *gen_neon_narrow[3] = {
2786
    gen_op_neon_narrow_u8,
2787
    gen_op_neon_narrow_u16,
2788
    gen_op_neon_narrow_u32
2789
};
2790

    
2791
static GenOpFunc *gen_neon_narrow_satu[3] = {
2792
    gen_op_neon_narrow_sat_u8,
2793
    gen_op_neon_narrow_sat_u16,
2794
    gen_op_neon_narrow_sat_u32
2795
};
2796

    
2797
static GenOpFunc *gen_neon_narrow_sats[3] = {
2798
    gen_op_neon_narrow_sat_s8,
2799
    gen_op_neon_narrow_sat_s16,
2800
    gen_op_neon_narrow_sat_s32
2801
};
2802

    
2803
static inline int gen_neon_add(int size)
2804
{
2805
    switch (size) {
2806
    case 0: gen_op_neon_add_u8(); break;
2807
    case 1: gen_op_neon_add_u16(); break;
2808
    case 2: gen_op_addl_T0_T1(); break;
2809
    default: return 1;
2810
    }
2811
    return 0;
2812
}
2813

    
2814
/* 32-bit pairwise ops end up the same as the elementsise versions.  */
2815
#define gen_op_neon_pmax_s32  gen_op_neon_max_s32
2816
#define gen_op_neon_pmax_u32  gen_op_neon_max_u32
2817
#define gen_op_neon_pmin_s32  gen_op_neon_min_s32
2818
#define gen_op_neon_pmin_u32  gen_op_neon_min_u32
2819

    
2820
#define GEN_NEON_INTEGER_OP(name) do { \
2821
    switch ((size << 1) | u) { \
2822
    case 0: gen_op_neon_##name##_s8(); break; \
2823
    case 1: gen_op_neon_##name##_u8(); break; \
2824
    case 2: gen_op_neon_##name##_s16(); break; \
2825
    case 3: gen_op_neon_##name##_u16(); break; \
2826
    case 4: gen_op_neon_##name##_s32(); break; \
2827
    case 5: gen_op_neon_##name##_u32(); break; \
2828
    default: return 1; \
2829
    }} while (0)
2830

    
2831
static inline void
2832
gen_neon_movl_scratch_T0(int scratch)
2833
{
2834
  uint32_t offset;
2835

    
2836
  offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2837
  gen_op_neon_setreg_T0(offset);
2838
}
2839

    
2840
static inline void
2841
gen_neon_movl_scratch_T1(int scratch)
2842
{
2843
  uint32_t offset;
2844

    
2845
  offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2846
  gen_op_neon_setreg_T1(offset);
2847
}
2848

    
2849
static inline void
2850
gen_neon_movl_T0_scratch(int scratch)
2851
{
2852
  uint32_t offset;
2853

    
2854
  offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2855
  gen_op_neon_getreg_T0(offset);
2856
}
2857

    
2858
static inline void
2859
gen_neon_movl_T1_scratch(int scratch)
2860
{
2861
  uint32_t offset;
2862

    
2863
  offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2864
  gen_op_neon_getreg_T1(offset);
2865
}
2866

    
2867
static inline void gen_op_neon_widen_u32(void)
2868
{
2869
    gen_op_movl_T1_im(0);
2870
}
2871

    
2872
static inline void gen_neon_get_scalar(int size, int reg)
2873
{
2874
    if (size == 1) {
2875
        NEON_GET_REG(T0, reg >> 1, reg & 1);
2876
    } else {
2877
        NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
2878
        if (reg & 1)
2879
            gen_op_neon_dup_low16();
2880
        else
2881
            gen_op_neon_dup_high16();
2882
    }
2883
}
2884

    
2885
static void gen_neon_unzip(int reg, int q, int tmp, int size)
2886
{
2887
    int n;
2888

    
2889
    for (n = 0; n < q + 1; n += 2) {
2890
        NEON_GET_REG(T0, reg, n);
2891
        NEON_GET_REG(T0, reg, n + n);
2892
        switch (size) {
2893
        case 0: gen_op_neon_unzip_u8(); break;
2894
        case 1: gen_op_neon_zip_u16(); break; /* zip and unzip are the same.  */
2895
        case 2: /* no-op */; break;
2896
        default: abort();
2897
        }
2898
        gen_neon_movl_scratch_T0(tmp + n);
2899
        gen_neon_movl_scratch_T1(tmp + n + 1);
2900
    }
2901
}
2902

    
2903
static struct {
2904
    int nregs;
2905
    int interleave;
2906
    int spacing;
2907
} neon_ls_element_type[11] = {
2908
    {4, 4, 1},
2909
    {4, 4, 2},
2910
    {4, 1, 1},
2911
    {4, 2, 1},
2912
    {3, 3, 1},
2913
    {3, 3, 2},
2914
    {3, 1, 1},
2915
    {1, 1, 1},
2916
    {2, 2, 1},
2917
    {2, 2, 2},
2918
    {2, 1, 1}
2919
};
2920

    
2921
/* Translate a NEON load/store element instruction.  Return nonzero if the
2922
   instruction is invalid.  */
2923
static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
2924
{
2925
    int rd, rn, rm;
2926
    int op;
2927
    int nregs;
2928
    int interleave;
2929
    int stride;
2930
    int size;
2931
    int reg;
2932
    int pass;
2933
    int load;
2934
    int shift;
2935
    uint32_t mask;
2936
    int n;
2937

    
2938
    if (!vfp_enabled(env))
2939
      return 1;
2940
    VFP_DREG_D(rd, insn);
2941
    rn = (insn >> 16) & 0xf;
2942
    rm = insn & 0xf;
2943
    load = (insn & (1 << 21)) != 0;
2944
    if ((insn & (1 << 23)) == 0) {
2945
        /* Load store all elements.  */
2946
        op = (insn >> 8) & 0xf;
2947
        size = (insn >> 6) & 3;
2948
        if (op > 10 || size == 3)
2949
            return 1;
2950
        nregs = neon_ls_element_type[op].nregs;
2951
        interleave = neon_ls_element_type[op].interleave;
2952
        gen_movl_T1_reg(s, rn);
2953
        stride = (1 << size) * interleave;
2954
        for (reg = 0; reg < nregs; reg++) {
2955
            if (interleave > 2 || (interleave == 2 && nregs == 2)) {
2956
                gen_movl_T1_reg(s, rn);
2957
                gen_op_addl_T1_im((1 << size) * reg);
2958
            } else if (interleave == 2 && nregs == 4 && reg == 2) {
2959
                gen_movl_T1_reg(s, rn);
2960
                gen_op_addl_T1_im(1 << size);
2961
            }
2962
            for (pass = 0; pass < 2; pass++) {
2963
                if (size == 2) {
2964
                    if (load) {
2965
                        gen_ldst(ldl, s);
2966
                        NEON_SET_REG(T0, rd, pass);
2967
                    } else {
2968
                        NEON_GET_REG(T0, rd, pass);
2969
                        gen_ldst(stl, s);
2970
                    }
2971
                    gen_op_addl_T1_im(stride);
2972
                } else if (size == 1) {
2973
                    if (load) {
2974
                        gen_ldst(lduw, s);
2975
                        gen_op_addl_T1_im(stride);
2976
                        gen_op_movl_T2_T0();
2977
                        gen_ldst(lduw, s);
2978
                        gen_op_addl_T1_im(stride);
2979
                        gen_op_neon_insert_elt(16, 0xffff);
2980
                        NEON_SET_REG(T2, rd, pass);
2981
                    } else {
2982
                        NEON_GET_REG(T2, rd, pass);
2983
                        gen_op_movl_T0_T2();
2984
                        gen_ldst(stw, s);
2985
                        gen_op_addl_T1_im(stride);
2986
                        gen_op_neon_extract_elt(16, 0xffff0000);
2987
                        gen_ldst(stw, s);
2988
                        gen_op_addl_T1_im(stride);
2989
                    }
2990
                } else /* size == 0 */ {
2991
                    if (load) {
2992
                        mask = 0xff;
2993
                        for (n = 0; n < 4; n++) {
2994
                            gen_ldst(ldub, s);
2995
                            gen_op_addl_T1_im(stride);
2996
                            if (n == 0) {
2997
                                gen_op_movl_T2_T0();
2998
                            } else {
2999
                                gen_op_neon_insert_elt(n * 8, ~mask);
3000
                            }
3001
                            mask <<= 8;
3002
                        }
3003
                        NEON_SET_REG(T2, rd, pass);
3004
                    } else {
3005
                        NEON_GET_REG(T2, rd, pass);
3006
                        mask = 0xff;
3007
                        for (n = 0; n < 4; n++) {
3008
                            if (n == 0) {
3009
                                gen_op_movl_T0_T2();
3010
                            } else {
3011
                                gen_op_neon_extract_elt(n * 8, mask);
3012
                            }
3013
                            gen_ldst(stb, s);
3014
                            gen_op_addl_T1_im(stride);
3015
                            mask <<= 8;
3016
                        }
3017
                    }
3018
                }
3019
            }
3020
            rd += neon_ls_element_type[op].spacing;
3021
        }
3022
        stride = nregs * 8;
3023
    } else {
3024
        size = (insn >> 10) & 3;
3025
        if (size == 3) {
3026
            /* Load single element to all lanes.  */
3027
            if (!load)
3028
                return 1;
3029
            size = (insn >> 6) & 3;
3030
            nregs = ((insn >> 8) & 3) + 1;
3031
            stride = (insn & (1 << 5)) ? 2 : 1;
3032
            gen_movl_T1_reg(s, rn);
3033
            for (reg = 0; reg < nregs; reg++) {
3034
                switch (size) {
3035
                case 0:
3036
                    gen_ldst(ldub, s);
3037
                    gen_op_neon_dup_u8(0);
3038
                    break;
3039
                case 1:
3040
                    gen_ldst(lduw, s);
3041
                    gen_op_neon_dup_low16();
3042
                    break;
3043
                case 2:
3044
                    gen_ldst(ldl, s);
3045
                    break;
3046
                case 3:
3047
                    return 1;
3048
                }
3049
                gen_op_addl_T1_im(1 << size);
3050
                NEON_SET_REG(T0, rd, 0);
3051
                NEON_SET_REG(T0, rd, 1);
3052
                rd += stride;
3053
            }
3054
            stride = (1 << size) * nregs;
3055
        } else {
3056
            /* Single element.  */
3057
            pass = (insn >> 7) & 1;
3058
            switch (size) {
3059
            case 0:
3060
                shift = ((insn >> 5) & 3) * 8;
3061
                mask = 0xff << shift;
3062
                stride = 1;
3063
                break;
3064
            case 1:
3065
                shift = ((insn >> 6) & 1) * 16;
3066
                mask = shift ? 0xffff0000 : 0xffff;
3067
                stride = (insn & (1 << 5)) ? 2 : 1;
3068
                break;
3069
            case 2:
3070
                shift = 0;
3071
                mask = 0xffffffff;
3072
                stride = (insn & (1 << 6)) ? 2 : 1;
3073
                break;
3074
            default:
3075
                abort();
3076
            }
3077
            nregs = ((insn >> 8) & 3) + 1;
3078
            gen_movl_T1_reg(s, rn);
3079
            for (reg = 0; reg < nregs; reg++) {
3080
                if (load) {
3081
                    if (size != 2) {
3082
                        NEON_GET_REG(T2, rd, pass);
3083
                    }
3084
                    switch (size) {
3085
                    case 0:
3086
                        gen_ldst(ldub, s);
3087
                        break;
3088
                    case 1:
3089
                        gen_ldst(lduw, s);
3090
                        break;
3091
                    case 2:
3092
                        gen_ldst(ldl, s);
3093
                        NEON_SET_REG(T0, rd, pass);
3094
                        break;
3095
                    }
3096
                    if (size != 2) {
3097
                        gen_op_neon_insert_elt(shift, ~mask);
3098
                        NEON_SET_REG(T0, rd, pass);
3099
                    }
3100
                } else { /* Store */
3101
                    if (size == 2) {
3102
                        NEON_GET_REG(T0, rd, pass);
3103
                    } else {
3104
                        NEON_GET_REG(T2, rd, pass);
3105
                        gen_op_neon_extract_elt(shift, mask);
3106
                    }
3107
                    switch (size) {
3108
                    case 0:
3109
                        gen_ldst(stb, s);
3110
                        break;
3111
                    case 1:
3112
                        gen_ldst(stw, s);
3113
                        break;
3114
                    case 2:
3115
                        gen_ldst(stl, s);
3116
                        break;
3117
                    }
3118
                }
3119
                rd += stride;
3120
                gen_op_addl_T1_im(1 << size);
3121
            }
3122
            stride = nregs * (1 << size);
3123
        }
3124
    }
3125
    if (rm != 15) {
3126
        TCGv base;
3127

    
3128
        base = load_reg(s, rn);
3129
        if (rm == 13) {
3130
            tcg_gen_addi_i32(base, base, stride);
3131
        } else {
3132
            TCGv index;
3133
            index = load_reg(s, rm);
3134
            tcg_gen_add_i32(base, base, index);
3135
            dead_tmp(index);
3136
        }
3137
        store_reg(s, rn, base);
3138
    }
3139
    return 0;
3140
}
3141

    
3142
/* Translate a NEON data processing instruction.  Return nonzero if the
3143
   instruction is invalid.
3144
   In general we process vectors in 32-bit chunks.  This means we can reuse
3145
   some of the scalar ops, and hopefully the code generated for 32-bit
3146
   hosts won't be too awful.  The downside is that the few 64-bit operations
3147
   (mainly shifts) get complicated.  */
3148

    
3149
static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
3150
{
3151
    int op;
3152
    int q;
3153
    int rd, rn, rm;
3154
    int size;
3155
    int shift;
3156
    int pass;
3157
    int count;
3158
    int pairwise;
3159
    int u;
3160
    int n;
3161
    uint32_t imm;
3162

    
3163
    if (!vfp_enabled(env))
3164
      return 1;
3165
    q = (insn & (1 << 6)) != 0;
3166
    u = (insn >> 24) & 1;
3167
    VFP_DREG_D(rd, insn);
3168
    VFP_DREG_N(rn, insn);
3169
    VFP_DREG_M(rm, insn);
3170
    size = (insn >> 20) & 3;
3171
    if ((insn & (1 << 23)) == 0) {
3172
        /* Three register same length.  */
3173
        op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
3174
        if (size == 3 && (op == 1 || op == 5 || op == 16)) {
3175
            for (pass = 0; pass < (q ? 2 : 1); pass++) {
3176
                NEON_GET_REG(T0, rm, pass * 2);
3177
                NEON_GET_REG(T1, rm, pass * 2 + 1);
3178
                gen_neon_movl_scratch_T0(0);
3179
                gen_neon_movl_scratch_T1(1);
3180
                NEON_GET_REG(T0, rn, pass * 2);
3181
                NEON_GET_REG(T1, rn, pass * 2 + 1);
3182
                switch (op) {
3183
                case 1: /* VQADD */
3184
                    if (u) {
3185
                        gen_op_neon_addl_saturate_u64();
3186
                    } else {
3187
                        gen_op_neon_addl_saturate_s64();
3188
                    }
3189
                    break;
3190
                case 5: /* VQSUB */
3191
                    if (u) {
3192
                        gen_op_neon_subl_saturate_u64();
3193
                    } else {
3194
                        gen_op_neon_subl_saturate_s64();
3195
                    }
3196
                    break;
3197
                case 16:
3198
                    if (u) {
3199
                        gen_op_neon_subl_u64();
3200
                    } else {
3201
                        gen_op_neon_addl_u64();
3202
                    }
3203
                    break;
3204
                default:
3205
                    abort();
3206
                }
3207
                NEON_SET_REG(T0, rd, pass * 2);
3208
                NEON_SET_REG(T1, rd, pass * 2 + 1);
3209
            }
3210
            return 0;
3211
        }
3212
        switch (op) {
3213
        case 8: /* VSHL */
3214
        case 9: /* VQSHL */
3215
        case 10: /* VRSHL */
3216
        case 11: /* VQSHL */
3217
            /* Shift operations have Rn and Rm reversed.  */
3218
            {
3219
                int tmp;
3220
                tmp = rn;
3221
                rn = rm;
3222
                rm = tmp;
3223
                pairwise = 0;
3224
            }
3225
            break;
3226
        case 20: /* VPMAX */
3227
        case 21: /* VPMIN */
3228
        case 23: /* VPADD */
3229
            pairwise = 1;
3230
            break;
3231
        case 26: /* VPADD (float) */
3232
            pairwise = (u && size < 2);
3233
            break;
3234
        case 30: /* VPMIN/VPMAX (float) */
3235
            pairwise = u;
3236
            break;
3237
        default:
3238
            pairwise = 0;
3239
            break;
3240
        }
3241
        for (pass = 0; pass < (q ? 4 : 2); pass++) {
3242

    
3243
        if (pairwise) {
3244
            /* Pairwise.  */
3245
            if (q)
3246
                n = (pass & 1) * 2;
3247
            else
3248
                n = 0;
3249
            if (pass < q + 1) {
3250
                NEON_GET_REG(T0, rn, n);
3251
                NEON_GET_REG(T1, rn, n + 1);
3252
            } else {
3253
                NEON_GET_REG(T0, rm, n);
3254
                NEON_GET_REG(T1, rm, n + 1);
3255
            }
3256
        } else {
3257
            /* Elementwise.  */
3258
            NEON_GET_REG(T0, rn, pass);
3259
            NEON_GET_REG(T1, rm, pass);
3260
        }
3261
        switch (op) {
3262
        case 0: /* VHADD */
3263
            GEN_NEON_INTEGER_OP(hadd);
3264
            break;
3265
        case 1: /* VQADD */
3266
            switch (size << 1| u) {
3267
            case 0: gen_op_neon_qadd_s8(); break;
3268
            case 1: gen_op_neon_qadd_u8(); break;
3269
            case 2: gen_op_neon_qadd_s16(); break;
3270
            case 3: gen_op_neon_qadd_u16(); break;
3271
            case 4: gen_op_addl_T0_T1_saturate(); break;
3272
            case 5: gen_op_addl_T0_T1_usaturate(); break;
3273
            default: abort();
3274
            }
3275
            break;
3276
        case 2: /* VRHADD */
3277
            GEN_NEON_INTEGER_OP(rhadd);
3278
            break;
3279
        case 3: /* Logic ops.  */
3280
            switch ((u << 2) | size) {
3281
            case 0: /* VAND */
3282
                gen_op_andl_T0_T1();
3283
                break;
3284
            case 1: /* BIC */
3285
                gen_op_bicl_T0_T1();
3286
                break;
3287
            case 2: /* VORR */
3288
                gen_op_orl_T0_T1();
3289
                break;
3290
            case 3: /* VORN */
3291
                gen_op_notl_T1();
3292
                gen_op_orl_T0_T1();
3293
                break;
3294
            case 4: /* VEOR */
3295
                gen_op_xorl_T0_T1();
3296
                break;
3297
            case 5: /* VBSL */
3298
                NEON_GET_REG(T2, rd, pass);
3299
                gen_op_neon_bsl();
3300
                break;
3301
            case 6: /* VBIT */
3302
                NEON_GET_REG(T2, rd, pass);
3303
                gen_op_neon_bit();
3304
                break;
3305
            case 7: /* VBIF */
3306
                NEON_GET_REG(T2, rd, pass);
3307
                gen_op_neon_bif();
3308
                break;
3309
            }
3310
            break;
3311
        case 4: /* VHSUB */
3312
            GEN_NEON_INTEGER_OP(hsub);
3313
            break;
3314
        case 5: /* VQSUB */
3315
            switch ((size << 1) | u) {
3316
            case 0: gen_op_neon_qsub_s8(); break;
3317
            case 1: gen_op_neon_qsub_u8(); break;
3318
            case 2: gen_op_neon_qsub_s16(); break;
3319
            case 3: gen_op_neon_qsub_u16(); break;
3320
            case 4: gen_op_subl_T0_T1_saturate(); break;
3321
            case 5: gen_op_subl_T0_T1_usaturate(); break;
3322
            default: abort();
3323
            }
3324
            break;
3325
        case 6: /* VCGT */
3326
            GEN_NEON_INTEGER_OP(cgt);
3327
            break;
3328
        case 7: /* VCGE */
3329
            GEN_NEON_INTEGER_OP(cge);
3330
            break;
3331
        case 8: /* VSHL */
3332
            switch ((size << 1) | u) {
3333
            case 0: gen_op_neon_shl_s8(); break;
3334
            case 1: gen_op_neon_shl_u8(); break;
3335
            case 2: gen_op_neon_shl_s16(); break;
3336
            case 3: gen_op_neon_shl_u16(); break;
3337
            case 4: gen_op_neon_shl_s32(); break;
3338
            case 5: gen_op_neon_shl_u32(); break;
3339
#if 0
3340
            /* ??? Implementing these is tricky because the vector ops work
3341
               on 32-bit pieces.  */
3342
            case 6: gen_op_neon_shl_s64(); break;
3343
            case 7: gen_op_neon_shl_u64(); break;
3344
#else
3345
            case 6: case 7: cpu_abort(env, "VSHL.64 not implemented");
3346
#endif
3347
            }
3348
            break;
3349
        case 9: /* VQSHL */
3350
            switch ((size << 1) | u) {
3351
            case 0: gen_op_neon_qshl_s8(); break;
3352
            case 1: gen_op_neon_qshl_u8(); break;
3353
            case 2: gen_op_neon_qshl_s16(); break;
3354
            case 3: gen_op_neon_qshl_u16(); break;
3355
            case 4: gen_op_neon_qshl_s32(); break;
3356
            case 5: gen_op_neon_qshl_u32(); break;
3357
#if 0
3358
            /* ??? Implementing these is tricky because the vector ops work
3359
               on 32-bit pieces.  */
3360
            case 6: gen_op_neon_qshl_s64(); break;
3361
            case 7: gen_op_neon_qshl_u64(); break;
3362
#else
3363
            case 6: case 7: cpu_abort(env, "VQSHL.64 not implemented");
3364
#endif
3365
            }
3366
            break;
3367
        case 10: /* VRSHL */
3368
            switch ((size << 1) | u) {
3369
            case 0: gen_op_neon_rshl_s8(); break;
3370
            case 1: gen_op_neon_rshl_u8(); break;
3371
            case 2: gen_op_neon_rshl_s16(); break;
3372
            case 3: gen_op_neon_rshl_u16(); break;
3373
            case 4: gen_op_neon_rshl_s32(); break;
3374
            case 5: gen_op_neon_rshl_u32(); break;
3375
#if 0
3376
            /* ??? Implementing these is tricky because the vector ops work
3377
               on 32-bit pieces.  */
3378
            case 6: gen_op_neon_rshl_s64(); break;
3379
            case 7: gen_op_neon_rshl_u64(); break;
3380
#else
3381
            case 6: case 7: cpu_abort(env, "VRSHL.64 not implemented");
3382
#endif
3383
            }
3384
            break;
3385
        case 11: /* VQRSHL */
3386
            switch ((size << 1) | u) {
3387
            case 0: gen_op_neon_qrshl_s8(); break;
3388
            case 1: gen_op_neon_qrshl_u8(); break;
3389
            case 2: gen_op_neon_qrshl_s16(); break;
3390
            case 3: gen_op_neon_qrshl_u16(); break;
3391
            case 4: gen_op_neon_qrshl_s32(); break;
3392
            case 5: gen_op_neon_qrshl_u32(); break;
3393
#if 0
3394
            /* ??? Implementing these is tricky because the vector ops work
3395
               on 32-bit pieces.  */
3396
            case 6: gen_op_neon_qrshl_s64(); break;
3397
            case 7: gen_op_neon_qrshl_u64(); break;
3398
#else
3399
            case 6: case 7: cpu_abort(env, "VQRSHL.64 not implemented");
3400
#endif
3401
            }
3402
            break;
3403
        case 12: /* VMAX */
3404
            GEN_NEON_INTEGER_OP(max);
3405
            break;
3406
        case 13: /* VMIN */
3407
            GEN_NEON_INTEGER_OP(min);
3408
            break;
3409
        case 14: /* VABD */
3410
            GEN_NEON_INTEGER_OP(abd);
3411
            break;
3412
        case 15: /* VABA */
3413
            GEN_NEON_INTEGER_OP(abd);
3414
            NEON_GET_REG(T1, rd, pass);
3415
            gen_neon_add(size);
3416
            break;
3417
        case 16:
3418
            if (!u) { /* VADD */
3419
                if (gen_neon_add(size))
3420
                    return 1;
3421
            } else { /* VSUB */
3422
                switch (size) {
3423
                case 0: gen_op_neon_sub_u8(); break;
3424
                case 1: gen_op_neon_sub_u16(); break;
3425
                case 2: gen_op_subl_T0_T1(); break;
3426
                default: return 1;
3427
                }
3428
            }
3429
            break;
3430
        case 17:
3431
            if (!u) { /* VTST */
3432
                switch (size) {
3433
                case 0: gen_op_neon_tst_u8(); break;
3434
                case 1: gen_op_neon_tst_u16(); break;
3435
                case 2: gen_op_neon_tst_u32(); break;
3436
                default: return 1;
3437
                }
3438
            } else { /* VCEQ */
3439
                switch (size) {
3440
                case 0: gen_op_neon_ceq_u8(); break;
3441
                case 1: gen_op_neon_ceq_u16(); break;
3442
                case 2: gen_op_neon_ceq_u32(); break;
3443
                default: return 1;
3444
                }
3445
            }
3446
            break;
3447
        case 18: /* Multiply.  */
3448
            switch (size) {
3449
            case 0: gen_op_neon_mul_u8(); break;
3450
            case 1: gen_op_neon_mul_u16(); break;
3451
            case 2: gen_op_mul_T0_T1(); break;
3452
            default: return 1;
3453
            }
3454
            NEON_GET_REG(T1, rd, pass);
3455
            if (u) { /* VMLS */
3456
                switch (size) {
3457
                case 0: gen_op_neon_rsb_u8(); break;
3458
                case 1: gen_op_neon_rsb_u16(); break;
3459
                case 2: gen_op_rsbl_T0_T1(); break;
3460
                default: return 1;
3461
                }
3462
            } else { /* VMLA */
3463
                gen_neon_add(size);
3464
            }
3465
            break;
3466
        case 19: /* VMUL */
3467
            if (u) { /* polynomial */
3468
                gen_op_neon_mul_p8();
3469
            } else { /* Integer */
3470
                switch (size) {
3471
                case 0: gen_op_neon_mul_u8(); break;
3472
                case 1: gen_op_neon_mul_u16(); break;
3473
                case 2: gen_op_mul_T0_T1(); break;
3474
                default: return 1;
3475
                }
3476
            }
3477
            break;
3478
        case 20: /* VPMAX */
3479
            GEN_NEON_INTEGER_OP(pmax);
3480
            break;
3481
        case 21: /* VPMIN */
3482
            GEN_NEON_INTEGER_OP(pmin);
3483
            break;
3484
        case 22: /* Hultiply high.  */
3485
            if (!u) { /* VQDMULH */
3486
                switch (size) {
3487
                case 1: gen_op_neon_qdmulh_s16(); break;
3488
                case 2: gen_op_neon_qdmulh_s32(); break;
3489
                default: return 1;
3490
                }
3491
            } else { /* VQRDHMUL */
3492
                switch (size) {
3493
                case 1: gen_op_neon_qrdmulh_s16(); break;
3494
                case 2: gen_op_neon_qrdmulh_s32(); break;
3495
                default: return 1;
3496
                }
3497
            }
3498
            break;
3499
        case 23: /* VPADD */
3500
            if (u)
3501
                return 1;
3502
            switch (size) {
3503
            case 0: gen_op_neon_padd_u8(); break;
3504
            case 1: gen_op_neon_padd_u16(); break;
3505
            case 2: gen_op_addl_T0_T1(); break;
3506
            default: return 1;
3507
            }
3508
            break;
3509
        case 26: /* Floating point arithnetic.  */
3510
            switch ((u << 2) | size) {
3511
            case 0: /* VADD */
3512
                gen_op_neon_add_f32();
3513
                break;
3514
            case 2: /* VSUB */
3515
                gen_op_neon_sub_f32();
3516
                break;
3517
            case 4: /* VPADD */
3518
                gen_op_neon_add_f32();
3519
                break;
3520
            case 6: /* VABD */
3521
                gen_op_neon_abd_f32();
3522
                break;
3523
            default:
3524
                return 1;
3525
            }
3526
            break;
3527
        case 27: /* Float multiply.  */
3528
            gen_op_neon_mul_f32();
3529
            if (!u) {
3530
                NEON_GET_REG(T1, rd, pass);
3531
                if (size == 0) {
3532
                    gen_op_neon_add_f32();
3533
                } else {
3534
                    gen_op_neon_rsb_f32();
3535
                }
3536
            }
3537
            break;
3538
        case 28: /* Float compare.  */
3539
            if (!u) {
3540
                gen_op_neon_ceq_f32();
3541
            } else {
3542
                if (size == 0)
3543
                    gen_op_neon_cge_f32();
3544
                else
3545
                    gen_op_neon_cgt_f32();
3546
            }
3547
            break;
3548
        case 29: /* Float compare absolute.  */
3549
            if (!u)
3550
                return 1;
3551
            if (size == 0)
3552
                gen_op_neon_acge_f32();
3553
            else
3554
                gen_op_neon_acgt_f32();
3555
            break;
3556
        case 30: /* Float min/max.  */
3557
            if (size == 0)
3558
                gen_op_neon_max_f32();
3559
            else
3560
                gen_op_neon_min_f32();
3561
            break;
3562
        case 31:
3563
            if (size == 0)
3564
                gen_op_neon_recps_f32();
3565
            else
3566
                gen_op_neon_rsqrts_f32();
3567
            break;
3568
        default:
3569
            abort();
3570
        }
3571
        /* Save the result.  For elementwise operations we can put it
3572
           straight into the destination register.  For pairwise operations
3573
           we have to be careful to avoid clobbering the source operands.  */
3574
        if (pairwise && rd == rm) {
3575
            gen_neon_movl_scratch_T0(pass);
3576
        } else {
3577
            NEON_SET_REG(T0, rd, pass);
3578
        }
3579

    
3580
        } /* for pass */
3581
        if (pairwise && rd == rm) {
3582
            for (pass = 0; pass < (q ? 4 : 2); pass++) {
3583
                gen_neon_movl_T0_scratch(pass);
3584
                NEON_SET_REG(T0, rd, pass);
3585
            }
3586
        }
3587
    } else if (insn & (1 << 4)) {
3588
        if ((insn & 0x00380080) != 0) {
3589
            /* Two registers and shift.  */
3590
            op = (insn >> 8) & 0xf;
3591
            if (insn & (1 << 7)) {
3592
                /* 64-bit shift.   */
3593
                size = 3;
3594
            } else {
3595
                size = 2;
3596
                while ((insn & (1 << (size + 19))) == 0)
3597
                    size--;
3598
            }
3599
            shift = (insn >> 16) & ((1 << (3 + size)) - 1);
3600
            /* To avoid excessive dumplication of ops we implement shift
3601
               by immediate using the variable shift operations.  */
3602
            if (op < 8) {
3603
                /* Shift by immediate:
3604
                   VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU.  */
3605
                /* Right shifts are encoded as N - shift, where N is the
3606
                   element size in bits.  */
3607
                if (op <= 4)
3608
                    shift = shift - (1 << (size + 3));
3609
                else
3610
                    shift++;
3611
                if (size == 3) {
3612
                    count = q + 1;
3613
                } else {
3614
                    count = q ? 4: 2;
3615
                }
3616
                switch (size) {
3617
                case 0:
3618
                    imm = (uint8_t) shift;
3619
                    imm |= imm << 8;
3620
                    imm |= imm << 16;
3621
                    break;
3622
                case 1:
3623
                    imm = (uint16_t) shift;
3624
                    imm |= imm << 16;
3625
                    break;
3626
                case 2:
3627
                case 3:
3628
                    imm = shift;
3629
                    break;
3630
                default:
3631
                    abort();
3632
                }
3633

    
3634
                for (pass = 0; pass < count; pass++) {
3635
                    if (size < 3) {
3636
                        /* Operands in T0 and T1.  */
3637
                        gen_op_movl_T1_im(imm);
3638
                        NEON_GET_REG(T0, rm, pass);
3639
                    } else {
3640
                        /* Operands in {T0, T1} and env->vfp.scratch.  */
3641
                        gen_op_movl_T0_im(imm);
3642
                        gen_neon_movl_scratch_T0(0);
3643
                        gen_op_movl_T0_im((int32_t)imm >> 31);
3644
                        gen_neon_movl_scratch_T0(1);
3645
                        NEON_GET_REG(T0, rm, pass * 2);
3646
                        NEON_GET_REG(T1, rm, pass * 2 + 1);
3647
                    }
3648

    
3649
                    if (gen_neon_shift_im[op][u][size] == NULL)
3650
                        return 1;
3651
                    gen_neon_shift_im[op][u][size]();
3652

    
3653
                    if (op == 1 || op == 3) {
3654
                        /* Accumulate.  */
3655
                        if (size == 3) {
3656
                            gen_neon_movl_scratch_T0(0);
3657
                            gen_neon_movl_scratch_T1(1);
3658
                            NEON_GET_REG(T0, rd, pass * 2);
3659
                            NEON_GET_REG(T1, rd, pass * 2 + 1);
3660
                            gen_op_neon_addl_u64();
3661
                        } else {
3662
                            NEON_GET_REG(T1, rd, pass);
3663
                            gen_neon_add(size);
3664
                        }
3665
                    } else if (op == 4 || (op == 5 && u)) {
3666
                        /* Insert */
3667
                        if (size == 3) {
3668
                            cpu_abort(env, "VS[LR]I.64 not implemented");
3669
                        }
3670
                        switch (size) {
3671
                        case 0:
3672
                            if (op == 4)
3673
                                imm = 0xff >> -shift;
3674
                            else
3675
                                imm = (uint8_t)(0xff << shift);
3676
                            imm |= imm << 8;
3677
                            imm |= imm << 16;
3678
                            break;
3679
                        case 1:
3680
                            if (op == 4)
3681
                                imm = 0xffff >> -shift;
3682
                            else
3683
                                imm = (uint16_t)(0xffff << shift);
3684
                            imm |= imm << 16;
3685
                            break;
3686
                        case 2:
3687
                            if (op == 4)
3688
                                imm = 0xffffffffu >> -shift;
3689
                            else
3690
                                imm = 0xffffffffu << shift;
3691
                            break;
3692
                        default:
3693
                            abort();
3694
                        }
3695
                        NEON_GET_REG(T1, rd, pass);
3696
                        gen_op_movl_T2_im(imm);
3697
                        gen_op_neon_bsl();
3698
                    }
3699
                    if (size == 3) {
3700
                        NEON_SET_REG(T0, rd, pass * 2);
3701
                        NEON_SET_REG(T1, rd, pass * 2 + 1);
3702
                    } else {
3703
                        NEON_SET_REG(T0, rd, pass);
3704
                    }
3705
                } /* for pass */
3706
            } else if (op < 10) {
3707
                /* Shift by immedaiate and narrow:
3708
                   VSHRN, VRSHRN, VQSHRN, VQRSHRN.  */
3709
                shift = shift - (1 << (size + 3));
3710
                size++;
3711
                if (size == 3) {
3712
                    count = q + 1;
3713
                } else {
3714
                    count = q ? 4: 2;
3715
                }
3716
                switch (size) {
3717
                case 1:
3718
                    imm = (uint16_t) shift;
3719
                    imm |= imm << 16;
3720
                    break;
3721
                case 2:
3722
                case 3:
3723
                    imm = shift;
3724
                    break;
3725
                default:
3726
                    abort();
3727
                }
3728

    
3729
                /* Processing MSB first means we need to do less shuffling at
3730
                   the end.  */
3731
                for (pass =  count - 1; pass >= 0; pass--) {
3732
                    /* Avoid clobbering the second operand before it has been
3733
                       written.  */
3734
                    n = pass;
3735
                    if (rd == rm)
3736
                        n ^= (count - 1);
3737
                    else
3738
                        n = pass;
3739

    
3740
                    if (size < 3) {
3741
                        /* Operands in T0 and T1.  */
3742
                        gen_op_movl_T1_im(imm);
3743
                        NEON_GET_REG(T0, rm, n);
3744
                    } else {
3745
                        /* Operands in {T0, T1} and env->vfp.scratch.  */
3746
                        gen_op_movl_T0_im(imm);
3747
                        gen_neon_movl_scratch_T0(0);
3748
                        gen_op_movl_T0_im((int32_t)imm >> 31);
3749
                        gen_neon_movl_scratch_T0(1);
3750
                        NEON_GET_REG(T0, rm, n * 2);
3751
                        NEON_GET_REG(T0, rm, n * 2 + 1);
3752
                    }
3753

    
3754
                    gen_neon_shift_im_narrow[q][u][size - 1]();
3755

    
3756
                    if (size < 3 && (pass & 1) == 0) {
3757
                        gen_neon_movl_scratch_T0(0);
3758
                    } else {
3759
                        uint32_t offset;
3760

    
3761
                        if (size < 3)
3762
                            gen_neon_movl_T1_scratch(0);
3763

    
3764
                        if (op == 8 && !u) {
3765
                            gen_neon_narrow[size - 1]();
3766
                        } else {
3767
                            if (op == 8)
3768
                                gen_neon_narrow_sats[size - 2]();
3769
                            else
3770
                                gen_neon_narrow_satu[size - 1]();
3771
                        }
3772
                        if (size == 3)
3773
                            offset = neon_reg_offset(rd, n);
3774
                        else
3775
                            offset = neon_reg_offset(rd, n >> 1);
3776
                        gen_op_neon_setreg_T0(offset);
3777
                    }
3778
                } /* for pass */
3779
            } else if (op == 10) {
3780
                /* VSHLL */
3781
                if (q)
3782
                    return 1;
3783
                for (pass = 0; pass < 2; pass++) {
3784
                    /* Avoid clobbering the input operand.  */
3785
                    if (rd == rm)
3786
                        n = 1 - pass;
3787
                    else
3788
                        n = pass;
3789

    
3790
                    NEON_GET_REG(T0, rm, n);
3791
                    GEN_NEON_INTEGER_OP(widen);
3792
                    if (shift != 0) {
3793
                        /* The shift is less than the width of the source
3794
                           type, so in some cases we can just
3795
                           shift the whole register.  */
3796
                        if (size == 1 || (size == 0 && u)) {
3797
                            gen_op_shll_T0_im(shift);
3798
                            gen_op_shll_T1_im(shift);
3799
                        } else {
3800
                            switch (size) {
3801
                            case 0: gen_op_neon_shll_u16(shift); break;
3802
                            case 2: gen_op_neon_shll_u64(shift); break;
3803
                            default: abort();
3804
                            }
3805
                        }
3806
                    }
3807
                    NEON_SET_REG(T0, rd, n * 2);
3808
                    NEON_SET_REG(T1, rd, n * 2 + 1);
3809
                }
3810
            } else if (op == 15 || op == 16) {
3811
                /* VCVT fixed-point.  */
3812
                for (pass = 0; pass < (q ? 4 : 2); pass++) {
3813
                    gen_op_vfp_getreg_F0s(neon_reg_offset(rm, pass));
3814
                    if (op & 1) {
3815
                        if (u)
3816
                            gen_op_vfp_ultos(shift);
3817
                        else
3818
                            gen_op_vfp_sltos(shift);
3819
                    } else {
3820
                        if (u)
3821
                            gen_op_vfp_touls(shift);
3822
                        else
3823
                            gen_op_vfp_tosls(shift);
3824
                    }
3825
                    gen_op_vfp_setreg_F0s(neon_reg_offset(rd, pass));
3826
                }
3827
            } else {
3828
                return 1;
3829
            }
3830
        } else { /* (insn & 0x00380080) == 0 */
3831
            int invert;
3832

    
3833
            op = (insn >> 8) & 0xf;
3834
            /* One register and immediate.  */
3835
            imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
3836
            invert = (insn & (1 << 5)) != 0;
3837
            switch (op) {
3838
            case 0: case 1:
3839
                /* no-op */
3840
                break;
3841
            case 2: case 3:
3842
                imm <<= 8;
3843
                break;
3844
            case 4: case 5:
3845
                imm <<= 16;
3846
                break;
3847
            case 6: case 7:
3848
                imm <<= 24;
3849
                break;
3850
            case 8: case 9:
3851
                imm |= imm << 16;
3852
                break;
3853
            case 10: case 11:
3854
                imm = (imm << 8) | (imm << 24);
3855
                break;
3856
            case 12:
3857
                imm = (imm < 8) | 0xff;
3858
                break;
3859
            case 13:
3860
                imm = (imm << 16) | 0xffff;
3861
                break;
3862
            case 14:
3863
                imm |= (imm << 8) | (imm << 16) | (imm << 24);
3864
                if (invert)
3865
                    imm = ~imm;
3866
                break;
3867
            case 15:
3868
                imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
3869
                      | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
3870
                break;
3871
            }
3872
            if (invert)
3873
                imm = ~imm;
3874

    
3875
            if (op != 14 || !invert)
3876
                gen_op_movl_T1_im(imm);
3877

    
3878
            for (pass = 0; pass < (q ? 4 : 2); pass++) {
3879
                if (op & 1 && op < 12) {
3880
                    NEON_GET_REG(T0, rd, pass);
3881
                    if (invert) {
3882
                        /* The immediate value has already been inverted, so
3883
                           BIC becomes AND.  */
3884
                        gen_op_andl_T0_T1();
3885
                    } else {
3886
                        gen_op_orl_T0_T1();
3887
                    }
3888
                    NEON_SET_REG(T0, rd, pass);
3889
                } else {
3890
                    if (op == 14 && invert) {
3891
                        uint32_t tmp;
3892
                        tmp = 0;
3893
                        for (n = 0; n < 4; n++) {
3894
                            if (imm & (1 << (n + (pass & 1) * 4)))
3895
                                tmp |= 0xff << (n * 8);
3896
                        }
3897
                        gen_op_movl_T1_im(tmp);
3898
                    }
3899
                    /* VMOV, VMVN.  */
3900
                    NEON_SET_REG(T1, rd, pass);
3901
                }
3902
            }
3903
        }
3904
    } else { /* (insn & 0x00800010 == 0x00800010) */
3905
        if (size != 3) {
3906
            op = (insn >> 8) & 0xf;
3907
            if ((insn & (1 << 6)) == 0) {
3908
                /* Three registers of different lengths.  */
3909
                int src1_wide;
3910
                int src2_wide;
3911
                int prewiden;
3912
                /* prewiden, src1_wide, src2_wide */
3913
                static const int neon_3reg_wide[16][3] = {
3914
                    {1, 0, 0}, /* VADDL */
3915
                    {1, 1, 0}, /* VADDW */
3916
                    {1, 0, 0}, /* VSUBL */
3917
                    {1, 1, 0}, /* VSUBW */
3918
                    {0, 1, 1}, /* VADDHN */
3919
                    {0, 0, 0}, /* VABAL */
3920
                    {0, 1, 1}, /* VSUBHN */
3921
                    {0, 0, 0}, /* VABDL */
3922
                    {0, 0, 0}, /* VMLAL */
3923
                    {0, 0, 0}, /* VQDMLAL */
3924
                    {0, 0, 0}, /* VMLSL */
3925
                    {0, 0, 0}, /* VQDMLSL */
3926
                    {0, 0, 0}, /* Integer VMULL */
3927
                    {0, 0, 0}, /* VQDMULL */
3928
                    {0, 0, 0}  /* Polynomial VMULL */
3929
                };
3930

    
3931
                prewiden = neon_3reg_wide[op][0];
3932
                src1_wide = neon_3reg_wide[op][1];
3933
                src2_wide = neon_3reg_wide[op][2];
3934

    
3935
                /* Avoid overlapping operands.  Wide source operands are
3936
                   always aligned so will never overlap with wide
3937
                   destinations in problematic ways.  */
3938
                if (rd == rm) {
3939
                    NEON_GET_REG(T2, rm, 1);
3940
                } else if (rd == rn) {
3941
                    NEON_GET_REG(T2, rn, 1);
3942
                }
3943
                for (pass = 0; pass < 2; pass++) {
3944
                    /* Load the second operand into env->vfp.scratch.
3945
                       Also widen narrow operands.  */
3946
                    if (pass == 1 && rd == rm) {
3947
                        if (prewiden) {
3948
                            gen_op_movl_T0_T2();
3949
                        } else {
3950
                            gen_op_movl_T1_T2();
3951
                        }
3952
                    } else {
3953
                        if (src2_wide) {
3954
                            NEON_GET_REG(T0, rm, pass * 2);
3955
                            NEON_GET_REG(T1, rm, pass * 2 + 1);
3956
                        } else {
3957
                            if (prewiden) {
3958
                                NEON_GET_REG(T0, rm, pass);
3959
                            } else {
3960
                                NEON_GET_REG(T1, rm, pass);
3961
                            }
3962
                        }
3963
                    }
3964
                    if (prewiden && !src2_wide) {
3965
                        GEN_NEON_INTEGER_OP(widen);
3966
                    }
3967
                    if (prewiden || src2_wide) {
3968
                        gen_neon_movl_scratch_T0(0);
3969
                        gen_neon_movl_scratch_T1(1);
3970
                    }
3971

    
3972
                    /* Load the first operand.  */
3973
                    if (pass == 1 && rd == rn) {
3974
                        gen_op_movl_T0_T2();
3975
                    } else {
3976
                        if (src1_wide) {
3977
                            NEON_GET_REG(T0, rn, pass * 2);
3978
                            NEON_GET_REG(T1, rn, pass * 2 + 1);
3979
                        } else {
3980
                            NEON_GET_REG(T0, rn, pass);
3981
                        }
3982
                    }
3983
                    if (prewiden && !src1_wide) {
3984
                        GEN_NEON_INTEGER_OP(widen);
3985
                    }
3986
                    switch (op) {
3987
                    case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
3988
                        switch (size) {
3989
                        case 0: gen_op_neon_addl_u16(); break;
3990
                        case 1: gen_op_neon_addl_u32(); break;
3991
                        case 2: gen_op_neon_addl_u64(); break;
3992
                        default: abort();
3993
                        }
3994
                        break;
3995
                    case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
3996
                        switch (size) {
3997
                        case 0: gen_op_neon_subl_u16(); break;
3998
                        case 1: gen_op_neon_subl_u32(); break;
3999
                        case 2: gen_op_neon_subl_u64(); break;
4000
                        default: abort();
4001
                        }
4002
                        break;
4003
                    case 5: case 7: /* VABAL, VABDL */
4004
                        switch ((size << 1) | u) {
4005
                        case 0: gen_op_neon_abdl_s16(); break;
4006
                        case 1: gen_op_neon_abdl_u16(); break;
4007
                        case 2: gen_op_neon_abdl_s32(); break;
4008
                        case 3: gen_op_neon_abdl_u32(); break;
4009
                        case 4: gen_op_neon_abdl_s64(); break;
4010
                        case 5: gen_op_neon_abdl_u64(); break;
4011
                        default: abort();
4012
                        }
4013
                        break;
4014
                    case 8: case 9: case 10: case 11: case 12: case 13:
4015
                        /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4016
                        switch ((size << 1) | u) {
4017
                        case 0: gen_op_neon_mull_s8(); break;
4018
                        case 1: gen_op_neon_mull_u8(); break;
4019
                        case 2: gen_op_neon_mull_s16(); break;
4020
                        case 3: gen_op_neon_mull_u16(); break;
4021
                        case 4: gen_op_imull_T0_T1(); break;
4022
                        case 5: gen_op_mull_T0_T1(); break;
4023
                        default: abort();
4024
                        }
4025
                        break;
4026
                    case 14: /* Polynomial VMULL */
4027
                        cpu_abort(env, "Polynomial VMULL not implemented");
4028

    
4029
                    default: /* 15 is RESERVED.  */
4030
                        return 1;
4031
                    }
4032
                    if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4033
                        /* Accumulate.  */
4034
                        if (op == 10 || op == 11) {
4035
                            switch (size) {
4036
                            case 0: gen_op_neon_negl_u16(); break;
4037
                            case 1: gen_op_neon_negl_u32(); break;
4038
                            case 2: gen_op_neon_negl_u64(); break;
4039
                            default: abort();
4040
                            }
4041
                        }
4042

    
4043
                        gen_neon_movl_scratch_T0(0);
4044
                        gen_neon_movl_scratch_T1(1);
4045

    
4046
                        if (op != 13) {
4047
                            NEON_GET_REG(T0, rd, pass * 2);
4048
                            NEON_GET_REG(T1, rd, pass * 2 + 1);
4049
                        }
4050

    
4051
                        switch (op) {
4052
                        case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4053
                            switch (size) {
4054
                            case 0: gen_op_neon_addl_u16(); break;
4055
                            case 1: gen_op_neon_addl_u32(); break;
4056
                            case 2: gen_op_neon_addl_u64(); break;
4057
                            default: abort();
4058
                            }
4059
                            break;
4060
                        case 9: case 11: /* VQDMLAL, VQDMLSL */
4061
                            switch (size) {
4062
                            case 1: gen_op_neon_addl_saturate_s32(); break;
4063
                            case 2: gen_op_neon_addl_saturate_s64(); break;
4064
                            default: abort();
4065
                            }
4066
                            /* Fall through.  */
4067
                        case 13: /* VQDMULL */
4068
                            switch (size) {
4069
                            case 1: gen_op_neon_addl_saturate_s32(); break;
4070
                            case 2: gen_op_neon_addl_saturate_s64(); break;
4071
                            default: abort();
4072
                            }
4073
                            break;
4074
                        default:
4075
                            abort();
4076
                        }
4077
                        NEON_SET_REG(T0, rd, pass * 2);
4078
                        NEON_SET_REG(T1, rd, pass * 2 + 1);
4079
                    } else if (op == 4 || op == 6) {
4080
                        /* Narrowing operation.  */
4081
                        if (u) {
4082
                            switch (size) {
4083
                            case 0: gen_op_neon_narrow_high_u8(); break;
4084
                            case 1: gen_op_neon_narrow_high_u16(); break;
4085
                            case 2: gen_op_movl_T0_T1(); break;
4086
                            default: abort();
4087
                            }
4088
                        } else {
4089
                            switch (size) {
4090
                            case 0: gen_op_neon_narrow_high_round_u8(); break;
4091
                            case 1: gen_op_neon_narrow_high_round_u16(); break;
4092
                            case 2: gen_op_neon_narrow_high_round_u32(); break;
4093
                            default: abort();
4094
                            }
4095
                        }
4096
                        NEON_SET_REG(T0, rd, pass);
4097
                    } else {
4098
                        /* Write back the result.  */
4099
                        NEON_SET_REG(T0, rd, pass * 2);
4100
                        NEON_SET_REG(T1, rd, pass * 2 + 1);
4101
                    }
4102
                }
4103
            } else {
4104
                /* Two registers and a scalar.  */
4105
                switch (op) {
4106
                case 0: /* Integer VMLA scalar */
4107
                case 1: /* Float VMLA scalar */
4108
                case 4: /* Integer VMLS scalar */
4109
                case 5: /* Floating point VMLS scalar */
4110
                case 8: /* Integer VMUL scalar */
4111
                case 9: /* Floating point VMUL scalar */
4112
                case 12: /* VQDMULH scalar */
4113
                case 13: /* VQRDMULH scalar */
4114
                    gen_neon_get_scalar(size, rm);
4115
                    gen_op_movl_T2_T0();
4116
                    for (pass = 0; pass < (u ? 4 : 2); pass++) {
4117
                        if (pass != 0)
4118
                            gen_op_movl_T0_T2();
4119
                        NEON_GET_REG(T1, rn, pass);
4120
                        if (op == 12) {
4121
                            if (size == 1) {
4122
                                gen_op_neon_qdmulh_s16();
4123
                            } else {
4124
                                gen_op_neon_qdmulh_s32();
4125
                            }
4126
                        } else if (op == 13) {
4127
                            if (size == 1) {
4128
                                gen_op_neon_qrdmulh_s16();
4129
                            } else {
4130
                                gen_op_neon_qrdmulh_s32();
4131
                            }
4132
                        } else if (op & 1) {
4133
                            gen_op_neon_mul_f32();
4134
                        } else {
4135
                            switch (size) {
4136
                            case 0: gen_op_neon_mul_u8(); break;
4137
                            case 1: gen_op_neon_mul_u16(); break;
4138
                            case 2: gen_op_mul_T0_T1(); break;
4139
                            default: return 1;
4140
                            }
4141
                        }
4142
                        if (op < 8) {
4143
                            /* Accumulate.  */
4144
                            NEON_GET_REG(T1, rd, pass);
4145
                            switch (op) {
4146
                            case 0:
4147
                                gen_neon_add(size);
4148
                                break;
4149
                            case 1:
4150
                                gen_op_neon_add_f32();
4151
                                break;
4152
                            case 4:
4153
                                switch (size) {
4154
                                case 0: gen_op_neon_rsb_u8(); break;
4155
                                case 1: gen_op_neon_rsb_u16(); break;
4156
                                case 2: gen_op_rsbl_T0_T1(); break;
4157
                                default: return 1;
4158
                                }
4159
                                break;
4160
                            case 5:
4161
                                gen_op_neon_rsb_f32();
4162
                                break;
4163
                            default:
4164
                                abort();
4165
                            }
4166
                        }
4167
                        NEON_SET_REG(T0, rd, pass);
4168
                    }
4169
                    break;
4170
                case 2: /* VMLAL sclar */
4171
                case 3: /* VQDMLAL scalar */
4172
                case 6: /* VMLSL scalar */
4173
                case 7: /* VQDMLSL scalar */
4174
                case 10: /* VMULL scalar */
4175
                case 11: /* VQDMULL scalar */
4176
                    if (rd == rn) {
4177
                        /* Save overlapping operands before they are
4178
                           clobbered.  */
4179
                        NEON_GET_REG(T0, rn, 1);
4180
                        gen_neon_movl_scratch_T0(2);
4181
                    }
4182
                    gen_neon_get_scalar(size, rm);
4183
                    gen_op_movl_T2_T0();
4184
                    for (pass = 0; pass < 2; pass++) {
4185
                        if (pass != 0) {
4186
                            gen_op_movl_T0_T2();
4187
                        }
4188
                        if (pass != 0 && rd == rn) {
4189
                            gen_neon_movl_T1_scratch(2);
4190
                        } else {
4191
                            NEON_GET_REG(T1, rn, pass);
4192
                        }
4193
                        switch ((size << 1) | u) {
4194
                        case 0: gen_op_neon_mull_s8(); break;
4195
                        case 1: gen_op_neon_mull_u8(); break;
4196
                        case 2: gen_op_neon_mull_s16(); break;
4197
                        case 3: gen_op_neon_mull_u16(); break;
4198
                        case 4: gen_op_imull_T0_T1(); break;
4199
                        case 5: gen_op_mull_T0_T1(); break;
4200
                        default: abort();
4201
                        }
4202
                        if (op == 6 || op == 7) {
4203
                            switch (size) {
4204
                            case 0: gen_op_neon_negl_u16(); break;
4205
                            case 1: gen_op_neon_negl_u32(); break;
4206
                            case 2: gen_op_neon_negl_u64(); break;
4207
                            default: abort();
4208
                            }
4209
                        }
4210
                        gen_neon_movl_scratch_T0(0);
4211
                        gen_neon_movl_scratch_T1(1);
4212
                        NEON_GET_REG(T0, rd, pass * 2);
4213
                        NEON_GET_REG(T1, rd, pass * 2 + 1);
4214
                        switch (op) {
4215
                        case 2: case 6:
4216
                            switch (size) {
4217
                            case 0: gen_op_neon_addl_u16(); break;
4218
                            case 1: gen_op_neon_addl_u32(); break;
4219
                            case 2: gen_op_neon_addl_u64(); break;
4220
                            default: abort();
4221
                            }
4222
                            break;
4223
                        case 3: case 7:
4224
                            switch (size) {
4225
                            case 1:
4226
                                gen_op_neon_addl_saturate_s32();
4227
                                gen_op_neon_addl_saturate_s32();
4228
                                break;
4229
                            case 2:
4230
                                gen_op_neon_addl_saturate_s64();
4231
                                gen_op_neon_addl_saturate_s64();
4232
                                break;
4233
                            default: abort();
4234
                            }
4235
                            break;
4236
                        case 10:
4237
                            /* no-op */
4238
                            break;
4239
                        case 11:
4240
                            switch (size) {
4241
                            case 1: gen_op_neon_addl_saturate_s32(); break;
4242
                            case 2: gen_op_neon_addl_saturate_s64(); break;
4243
                            default: abort();
4244
                            }
4245
                            break;
4246
                        default:
4247
                            abort();
4248
                        }
4249
                        NEON_SET_REG(T0, rd, pass * 2);
4250
                        NEON_SET_REG(T1, rd, pass * 2 + 1);
4251
                    }
4252
                    break;
4253
                default: /* 14 and 15 are RESERVED */
4254
                    return 1;
4255
                }
4256
            }
4257
        } else { /* size == 3 */
4258
            if (!u) {
4259
                /* Extract.  */
4260
                int reg;
4261
                imm = (insn >> 8) & 0xf;
4262
                reg = rn;
4263
                count = q ? 4 : 2;
4264
                n = imm >> 2;
4265
                NEON_GET_REG(T0, reg, n);
4266
                for (pass = 0; pass < count; pass++) {
4267
                    n++;
4268
                    if (n > count) {
4269
                        reg = rm;
4270
                        n -= count;
4271
                    }
4272
                    if (imm & 3) {
4273
                        NEON_GET_REG(T1, reg, n);
4274
                        gen_op_neon_extract((insn << 3) & 0x1f);
4275
                    }
4276
                    /* ??? This is broken if rd and rm overlap */
4277
                    NEON_SET_REG(T0, rd, pass);
4278
                    if (imm & 3) {
4279
                        gen_op_movl_T0_T1();
4280
                    } else {
4281
                        NEON_GET_REG(T0, reg, n);
4282
                    }
4283
                }
4284
            } else if ((insn & (1 << 11)) == 0) {
4285
                /* Two register misc.  */
4286
                op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
4287
                size = (insn >> 18) & 3;
4288
                switch (op) {
4289
                case 0: /* VREV64 */
4290
                    if (size == 3)
4291
                        return 1;
4292
                    for (pass = 0; pass < (q ? 2 : 1); pass++) {
4293
                        NEON_GET_REG(T0, rm, pass * 2);
4294
                        NEON_GET_REG(T1, rm, pass * 2 + 1);
4295
                        switch (size) {
4296
                        case 0: gen_op_rev_T0(); break;
4297
                        case 1: gen_op_revh_T0(); break;
4298
                        case 2: /* no-op */ break;
4299
                        default: abort();
4300
                        }
4301
                        NEON_SET_REG(T0, rd, pass * 2 + 1);
4302
                        if (size == 2) {
4303
                            NEON_SET_REG(T1, rd, pass * 2);
4304
                        } else {
4305
                            gen_op_movl_T0_T1();
4306
                            switch (size) {
4307
                            case 0: gen_op_rev_T0(); break;
4308
                            case 1: gen_op_revh_T0(); break;
4309
                            default: abort();
4310
                            }
4311
                            NEON_SET_REG(T0, rd, pass * 2);
4312
                        }
4313
                    }
4314
                    break;
4315
                case 4: case 5: /* VPADDL */
4316
                case 12: case 13: /* VPADAL */
4317
                    if (size < 2)
4318
                        goto elementwise;
4319
                    if (size == 3)
4320
                        return 1;
4321
                    for (pass = 0; pass < (q ? 2 : 1); pass++) {
4322
                        NEON_GET_REG(T0, rm, pass * 2);
4323
                        NEON_GET_REG(T1, rm, pass * 2 + 1);
4324
                        if (op & 1)
4325
                            gen_op_neon_paddl_u32();
4326
                        else
4327
                            gen_op_neon_paddl_s32();
4328
                        if (op >= 12) {
4329
                            /* Accumulate.  */
4330
                            gen_neon_movl_scratch_T0(0);
4331
                            gen_neon_movl_scratch_T1(1);
4332

    
4333
                            NEON_GET_REG(T0, rd, pass * 2);
4334
                            NEON_GET_REG(T1, rd, pass * 2 + 1);
4335
                            gen_op_neon_addl_u64();
4336
                        }
4337
                        NEON_SET_REG(T0, rd, pass * 2);
4338
                        NEON_SET_REG(T1, rd, pass * 2 + 1);
4339
                    }
4340
                    break;
4341
                case 33: /* VTRN */
4342
                    if (size == 2) {
4343
                        for (n = 0; n < (q ? 4 : 2); n += 2) {
4344
                            NEON_GET_REG(T0, rm, n);
4345
                            NEON_GET_REG(T1, rd, n + 1);
4346
                            NEON_SET_REG(T1, rm, n);
4347
                            NEON_SET_REG(T0, rd, n + 1);
4348
                        }
4349
                    } else {
4350
                        goto elementwise;
4351
                    }
4352
                    break;
4353
                case 34: /* VUZP */
4354
                    /* Reg  Before       After
4355
                       Rd   A3 A2 A1 A0  B2 B0 A2 A0
4356
                       Rm   B3 B2 B1 B0  B3 B1 A3 A1
4357
                     */
4358
                    if (size == 3)
4359
                        return 1;
4360
                    gen_neon_unzip(rd, q, 0, size);
4361
                    gen_neon_unzip(rm, q, 4, size);
4362
                    if (q) {
4363
                        static int unzip_order_q[8] =
4364
                            {0, 2, 4, 6, 1, 3, 5, 7};
4365
                        for (n = 0; n < 8; n++) {
4366
                            int reg = (n < 4) ? rd : rm;
4367
                            gen_neon_movl_T0_scratch(unzip_order_q[n]);
4368
                            NEON_SET_REG(T0, reg, n % 4);
4369
                        }
4370
                    } else {
4371
                        static int unzip_order[4] =
4372
                            {0, 4, 1, 5};
4373
                        for (n = 0; n < 4; n++) {
4374
                            int reg = (n < 2) ? rd : rm;
4375
                            gen_neon_movl_T0_scratch(unzip_order[n]);
4376
                            NEON_SET_REG(T0, reg, n % 2);
4377
                        }
4378
                    }
4379
                    break;
4380
                case 35: /* VZIP */
4381
                    /* Reg  Before       After
4382
                       Rd   A3 A2 A1 A0  B1 A1 B0 A0
4383
                       Rm   B3 B2 B1 B0  B3 A3 B2 A2
4384
                     */
4385
                    if (size == 3)
4386
                        return 1;
4387
                    count = (q ? 4 : 2);
4388
                    for (n = 0; n < count; n++) {
4389
                        NEON_GET_REG(T0, rd, n);
4390
                        NEON_GET_REG(T1, rd, n);
4391
                        switch (size) {
4392
                        case 0: gen_op_neon_zip_u8(); break;
4393
                        case 1: gen_op_neon_zip_u16(); break;
4394
                        case 2: /* no-op */; break;
4395
                        default: abort();
4396
                        }
4397
                        gen_neon_movl_scratch_T0(n * 2);
4398
                        gen_neon_movl_scratch_T1(n * 2 + 1);
4399
                    }
4400
                    for (n = 0; n < count * 2; n++) {
4401
                        int reg = (n < count) ? rd : rm;
4402
                        gen_neon_movl_T0_scratch(n);
4403
                        NEON_SET_REG(T0, reg, n % count);
4404
                    }
4405
                    break;
4406
                case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
4407
                    for (pass = 0; pass < 2; pass++) {
4408
                        if (rd == rm + 1) {
4409
                            n = 1 - pass;
4410
                        } else {
4411
                            n = pass;
4412
                        }
4413
                        NEON_GET_REG(T0, rm, n * 2);
4414
                        NEON_GET_REG(T1, rm, n * 2 + 1);
4415
                        if (op == 36 && q == 0) {
4416
                            switch (size) {
4417
                            case 0: gen_op_neon_narrow_u8(); break;
4418
                            case 1: gen_op_neon_narrow_u16(); break;
4419
                            case 2: /* no-op */ break;
4420
                            default: return 1;
4421
                            }
4422
                        } else if (q) {
4423
                            switch (size) {
4424
                            case 0: gen_op_neon_narrow_sat_u8(); break;
4425
                            case 1: gen_op_neon_narrow_sat_u16(); break;
4426
                            case 2: gen_op_neon_narrow_sat_u32(); break;
4427
                            default: return 1;
4428
                            }
4429
                        } else {
4430
                            switch (size) {
4431
                            case 0: gen_op_neon_narrow_sat_s8(); break;
4432
                            case 1: gen_op_neon_narrow_sat_s16(); break;
4433
                            case 2: gen_op_neon_narrow_sat_s32(); break;
4434
                            default: return 1;
4435
                            }
4436
                        }
4437
                        NEON_SET_REG(T0, rd, n);
4438
                    }
4439
                    break;
4440
                case 38: /* VSHLL */
4441
                    if (q)
4442
                        return 1;
4443
                    if (rm == rd) {
4444
                        NEON_GET_REG(T2, rm, 1);
4445
                    }
4446
                    for (pass = 0; pass < 2; pass++) {
4447
                        if (pass == 1 && rm == rd) {
4448
                            gen_op_movl_T0_T2();
4449
                        } else {
4450
                            NEON_GET_REG(T0, rm, pass);
4451
                        }
4452
                        switch (size) {
4453
                        case 0: gen_op_neon_widen_high_u8(); break;
4454
                        case 1: gen_op_neon_widen_high_u16(); break;
4455
                        case 2:
4456
                            gen_op_movl_T1_T0();
4457
                            gen_op_movl_T0_im(0);
4458
                            break;
4459
                        default: return 1;
4460
                        }
4461
                        NEON_SET_REG(T0, rd, pass * 2);
4462
                        NEON_SET_REG(T1, rd, pass * 2 + 1);
4463
                    }
4464
                    break;
4465
                default:
4466
                elementwise:
4467
                    for (pass = 0; pass < (q ? 4 : 2); pass++) {
4468
                        if (op == 30 || op == 31 || op >= 58) {
4469
                            gen_op_vfp_getreg_F0s(neon_reg_offset(rm, pass));
4470
                        } else {
4471
                            NEON_GET_REG(T0, rm, pass);
4472
                        }
4473
                        switch (op) {
4474
                        case 1: /* VREV32 */
4475
                            switch (size) {
4476
                            case 0: gen_op_rev_T0(); break;
4477
                            case 1: gen_op_revh_T0(); break;
4478
                            default: return 1;
4479
                            }
4480
                            break;
4481
                        case 2: /* VREV16 */
4482
                            if (size != 0)
4483
                                return 1;
4484
                            gen_op_rev16_T0();
4485
                            break;
4486
                        case 4: case 5: /* VPADDL */
4487
                        case 12: case 13: /* VPADAL */
4488
                            switch ((size << 1) | (op & 1)) {
4489
                            case 0: gen_op_neon_paddl_s8(); break;
4490
                            case 1: gen_op_neon_paddl_u8(); break;
4491
                            case 2: gen_op_neon_paddl_s16(); break;
4492
                            case 3: gen_op_neon_paddl_u16(); break;
4493
                            default: abort();
4494
                            }
4495
                            if (op >= 12) {
4496
                                /* Accumulate */
4497
                                NEON_GET_REG(T1, rd, pass);
4498
                                switch (size) {
4499
                                case 0: gen_op_neon_add_u16(); break;
4500
                                case 1: gen_op_addl_T0_T1(); break;
4501
                                default: abort();
4502
                                }
4503
                            }
4504
                            break;
4505
                        case 8: /* CLS */
4506
                            switch (size) {
4507
                            case 0: gen_op_neon_cls_s8(); break;
4508
                            case 1: gen_op_neon_cls_s16(); break;
4509
                            case 2: gen_op_neon_cls_s32(); break;
4510
                            default: return 1;
4511
                            }
4512
                            break;
4513
                        case 9: /* CLZ */
4514
                            switch (size) {
4515
                            case 0: gen_op_neon_clz_u8(); break;
4516
                            case 1: gen_op_neon_clz_u16(); break;
4517
                            case 2: gen_op_clz_T0(); break;
4518
                            default: return 1;
4519
                            }
4520
                            break;
4521
                        case 10: /* CNT */
4522
                            if (size != 0)
4523
                                return 1;
4524
                            gen_op_neon_cnt_u8();
4525
                            break;
4526
                        case 11: /* VNOT */
4527
                            if (size != 0)
4528
                                return 1;
4529
                            gen_op_notl_T0();
4530
                            break;
4531
                        case 14: /* VQABS */
4532
                            switch (size) {
4533
                            case 0: gen_op_neon_qabs_s8(); break;
4534
                            case 1: gen_op_neon_qabs_s16(); break;
4535
                            case 2: gen_op_neon_qabs_s32(); break;
4536
                            default: return 1;
4537
                            }
4538
                            break;
4539
                        case 15: /* VQNEG */
4540
                            switch (size) {
4541
                            case 0: gen_op_neon_qneg_s8(); break;
4542
                            case 1: gen_op_neon_qneg_s16(); break;
4543
                            case 2: gen_op_neon_qneg_s32(); break;
4544
                            default: return 1;
4545
                            }
4546
                            break;
4547
                        case 16: case 19: /* VCGT #0, VCLE #0 */
4548
                            gen_op_movl_T1_im(0);
4549
                            switch(size) {
4550
                            case 0: gen_op_neon_cgt_s8(); break;
4551
                            case 1: gen_op_neon_cgt_s16(); break;
4552
                            case 2: gen_op_neon_cgt_s32(); break;
4553
                            default: return 1;
4554
                            }
4555
                            if (op == 19)
4556
                                gen_op_notl_T0();
4557
                            break;
4558
                        case 17: case 20: /* VCGE #0, VCLT #0 */
4559
                            gen_op_movl_T1_im(0);
4560
                            switch(size) {
4561
                            case 0: gen_op_neon_cge_s8(); break;
4562
                            case 1: gen_op_neon_cge_s16(); break;
4563
                            case 2: gen_op_neon_cge_s32(); break;
4564
                            default: return 1;
4565
                            }
4566
                            if (op == 20)
4567
                                gen_op_notl_T0();
4568
                            break;
4569
                        case 18: /* VCEQ #0 */
4570
                            gen_op_movl_T1_im(0);
4571
                            switch(size) {
4572
                            case 0: gen_op_neon_ceq_u8(); break;
4573
                            case 1: gen_op_neon_ceq_u16(); break;
4574
                            case 2: gen_op_neon_ceq_u32(); break;
4575
                            default: return 1;
4576
                            }
4577
                            break;
4578
                        case 22: /* VABS */
4579
                            switch(size) {
4580
                            case 0: gen_op_neon_abs_s8(); break;
4581
                            case 1: gen_op_neon_abs_s16(); break;
4582
                            case 2: gen_op_neon_abs_s32(); break;
4583
                            default: return 1;
4584
                            }
4585
                            break;
4586
                        case 23: /* VNEG */
4587
                            gen_op_movl_T1_im(0);
4588
                            switch(size) {
4589
                            case 0: gen_op_neon_rsb_u8(); break;
4590
                            case 1: gen_op_neon_rsb_u16(); break;
4591
                            case 2: gen_op_rsbl_T0_T1(); break;
4592
                            default: return 1;
4593
                            }
4594
                            break;
4595
                        case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
4596
                            gen_op_movl_T1_im(0);
4597
                            gen_op_neon_cgt_f32();
4598
                            if (op == 27)
4599
                                gen_op_notl_T0();
4600
                            break;
4601
                        case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
4602
                            gen_op_movl_T1_im(0);
4603
                            gen_op_neon_cge_f32();
4604
                            if (op == 28)
4605
                                gen_op_notl_T0();
4606
                            break;
4607
                        case 26: /* Float VCEQ #0 */
4608
                            gen_op_movl_T1_im(0);
4609
                            gen_op_neon_ceq_f32();
4610
                            break;
4611
                        case 30: /* Float VABS */
4612
                            gen_op_vfp_abss();
4613
                            break;
4614
                        case 31: /* Float VNEG */
4615
                            gen_op_vfp_negs();
4616
                            break;
4617
                        case 32: /* VSWP */
4618
                            NEON_GET_REG(T1, rd, pass);
4619
                            NEON_SET_REG(T1, rm, pass);
4620
                            break;
4621
                        case 33: /* VTRN */
4622
                            NEON_GET_REG(T1, rd, pass);
4623
                            switch (size) {
4624
                            case 0: gen_op_neon_trn_u8(); break;
4625
                            case 1: gen_op_neon_trn_u16(); break;
4626
                            case 2: abort();
4627
                            default: return 1;
4628
                            }
4629
                            NEON_SET_REG(T1, rm, pass);
4630
                            break;
4631
                        case 56: /* Integer VRECPE */
4632
                            gen_op_neon_recpe_u32();
4633
                            break;
4634
                        case 57: /* Integer VRSQRTE */
4635
                            gen_op_neon_rsqrte_u32();
4636
                            break;
4637
                        case 58: /* Float VRECPE */
4638
                            gen_op_neon_recpe_f32();
4639
                            break;
4640
                        case 59: /* Float VRSQRTE */
4641
                            gen_op_neon_rsqrte_f32();
4642
                            break;
4643
                        case 60: /* VCVT.F32.S32 */
4644
                            gen_op_vfp_tosizs();
4645
                            break;
4646
                        case 61: /* VCVT.F32.U32 */
4647
                            gen_op_vfp_touizs();
4648
                            break;
4649
                        case 62: /* VCVT.S32.F32 */
4650
                            gen_op_vfp_sitos();
4651
                            break;
4652
                        case 63: /* VCVT.U32.F32 */
4653
                            gen_op_vfp_uitos();
4654
                            break;
4655
                        default:
4656
                            /* Reserved: 21, 29, 39-56 */
4657
                            return 1;
4658
                        }
4659
                        if (op == 30 || op == 31 || op >= 58) {
4660
                            gen_op_vfp_setreg_F0s(neon_reg_offset(rm, pass));
4661
                        } else {
4662
                            NEON_SET_REG(T0, rd, pass);
4663
                        }
4664
                    }
4665
                    break;
4666
                }
4667
            } else if ((insn & (1 << 10)) == 0) {
4668
                /* VTBL, VTBX.  */
4669
                n = (insn >> 5) & 0x18;
4670
                NEON_GET_REG(T1, rm, 0);
4671
                if (insn & (1 << 6)) {
4672
                    NEON_GET_REG(T0, rd, 0);
4673
                } else {
4674
                    gen_op_movl_T0_im(0);
4675
                }
4676
                gen_op_neon_tbl(rn, n);
4677
                gen_op_movl_T2_T0();
4678
                NEON_GET_REG(T1, rm, 1);
4679
                if (insn & (1 << 6)) {
4680
                    NEON_GET_REG(T0, rd, 0);
4681
                } else {
4682
                    gen_op_movl_T0_im(0);
4683
                }
4684
                gen_op_neon_tbl(rn, n);
4685
                NEON_SET_REG(T2, rd, 0);
4686
                NEON_SET_REG(T0, rd, 1);
4687
            } else if ((insn & 0x380) == 0) {
4688
                /* VDUP */
4689
                if (insn & (1 << 19)) {
4690
                    NEON_SET_REG(T0, rm, 1);
4691
                } else {
4692
                    NEON_SET_REG(T0, rm, 0);
4693
                }
4694
                if (insn & (1 << 16)) {
4695
                    gen_op_neon_dup_u8(((insn >> 17) & 3) * 8);
4696
                } else if (insn & (1 << 17)) {
4697
                    if ((insn >> 18) & 1)
4698
                        gen_op_neon_dup_high16();
4699
                    else
4700
                        gen_op_neon_dup_low16();
4701
                }
4702
                for (pass = 0; pass < (q ? 4 : 2); pass++) {
4703
                    NEON_SET_REG(T0, rd, pass);
4704
                }
4705
            } else {
4706
                return 1;
4707
            }
4708
        }
4709
    }
4710
    return 0;
4711
}
4712

    
4713
static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
4714
{
4715
    int cpnum;
4716

    
4717
    cpnum = (insn >> 8) & 0xf;
4718
    if (arm_feature(env, ARM_FEATURE_XSCALE)
4719
            && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
4720
        return 1;
4721

    
4722
    switch (cpnum) {
4723
      case 0:
4724
      case 1:
4725
        if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
4726
            return disas_iwmmxt_insn(env, s, insn);
4727
        } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
4728
            return disas_dsp_insn(env, s, insn);
4729
        }
4730
        return 1;
4731
    case 10:
4732
    case 11:
4733
        return disas_vfp_insn (env, s, insn);
4734
    case 15:
4735
        return disas_cp15_insn (env, s, insn);
4736
    default:
4737
        /* Unknown coprocessor.  See if the board has hooked it.  */
4738
        return disas_cp_insn (env, s, insn);
4739
    }
4740
}
4741

    
4742
static void disas_arm_insn(CPUState * env, DisasContext *s)
4743
{
4744
    unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
4745
    TCGv tmp;
4746

    
4747
    insn = ldl_code(s->pc);
4748
    s->pc += 4;
4749

    
4750
    /* M variants do not implement ARM mode.  */
4751
    if (IS_M(env))
4752
        goto illegal_op;
4753
    cond = insn >> 28;
4754
    if (cond == 0xf){
4755
        /* Unconditional instructions.  */
4756
        if (((insn >> 25) & 7) == 1) {
4757
            /* NEON Data processing.  */
4758
            if (!arm_feature(env, ARM_FEATURE_NEON))
4759
                goto illegal_op;
4760

    
4761
            if (disas_neon_data_insn(env, s, insn))
4762
                goto illegal_op;
4763
            return;
4764
        }
4765
        if ((insn & 0x0f100000) == 0x04000000) {
4766
            /* NEON load/store.  */
4767
            if (!arm_feature(env, ARM_FEATURE_NEON))
4768
                goto illegal_op;
4769

    
4770
            if (disas_neon_ls_insn(env, s, insn))
4771
                goto illegal_op;
4772
            return;
4773
        }
4774
        if ((insn & 0x0d70f000) == 0x0550f000)
4775
            return; /* PLD */
4776
        else if ((insn & 0x0ffffdff) == 0x01010000) {
4777
            ARCH(6);
4778
            /* setend */
4779
            if (insn & (1 << 9)) {
4780
                /* BE8 mode not implemented.  */
4781
                goto illegal_op;
4782
            }
4783
            return;
4784
        } else if ((insn & 0x0fffff00) == 0x057ff000) {
4785
            switch ((insn >> 4) & 0xf) {
4786
            case 1: /* clrex */
4787
                ARCH(6K);
4788
                gen_op_clrex();
4789
                return;
4790
            case 4: /* dsb */
4791
            case 5: /* dmb */
4792
            case 6: /* isb */
4793
                ARCH(7);
4794
                /* We don't emulate caches so these are a no-op.  */
4795
                return;
4796
            default:
4797
                goto illegal_op;
4798
            }
4799
        } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
4800
            /* srs */
4801
            uint32_t offset;
4802
            if (IS_USER(s))
4803
                goto illegal_op;
4804
            ARCH(6);
4805
            op1 = (insn & 0x1f);
4806
            if (op1 == (env->uncached_cpsr & CPSR_M)) {
4807
                gen_movl_T1_reg(s, 13);
4808
            } else {
4809
                gen_op_movl_T1_r13_banked(op1);
4810
            }
4811
            i = (insn >> 23) & 3;
4812
            switch (i) {
4813
            case 0: offset = -4; break; /* DA */
4814
            case 1: offset = -8; break; /* DB */
4815
            case 2: offset = 0; break; /* IA */
4816
            case 3: offset = 4; break; /* IB */
4817
            default: abort();
4818
            }
4819
            if (offset)
4820
                gen_op_addl_T1_im(offset);
4821
            gen_movl_T0_reg(s, 14);
4822
            gen_ldst(stl, s);
4823
            gen_op_movl_T0_cpsr();
4824
            gen_op_addl_T1_im(4);
4825
            gen_ldst(stl, s);
4826
            if (insn & (1 << 21)) {
4827
                /* Base writeback.  */
4828
                switch (i) {
4829
                case 0: offset = -8; break;
4830
                case 1: offset = -4; break;
4831
                case 2: offset = 4; break;
4832
                case 3: offset = 0; break;
4833
                default: abort();
4834
                }
4835
                if (offset)
4836
                    gen_op_addl_T1_im(offset);
4837
                if (op1 == (env->uncached_cpsr & CPSR_M)) {
4838
                    gen_movl_reg_T1(s, 13);
4839
                } else {
4840
                    gen_op_movl_r13_T1_banked(op1);
4841
                }
4842
            }
4843
        } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
4844
            /* rfe */
4845
            uint32_t offset;
4846
            if (IS_USER(s))
4847
                goto illegal_op;
4848
            ARCH(6);
4849
            rn = (insn >> 16) & 0xf;
4850
            gen_movl_T1_reg(s, rn);
4851
            i = (insn >> 23) & 3;
4852
            switch (i) {
4853
            case 0: offset = 0; break; /* DA */
4854
            case 1: offset = -4; break; /* DB */
4855
            case 2: offset = 4; break; /* IA */
4856
            case 3: offset = 8; break; /* IB */
4857
            default: abort();
4858
            }
4859
            if (offset)
4860
                gen_op_addl_T1_im(offset);
4861
            /* Load CPSR into T2 and PC into T0.  */
4862
            gen_ldst(ldl, s);
4863
            gen_op_movl_T2_T0();
4864
            gen_op_addl_T1_im(-4);
4865
            gen_ldst(ldl, s);
4866
            if (insn & (1 << 21)) {
4867
                /* Base writeback.  */
4868
                switch (i) {
4869
                case 0: offset = -4; break;
4870
                case 1: offset = 0; break;
4871
                case 2: offset = 8; break;
4872
                case 3: offset = 4; break;
4873
                default: abort();
4874
                }
4875
                if (offset)
4876
                    gen_op_addl_T1_im(offset);
4877
                gen_movl_reg_T1(s, rn);
4878
            }
4879
            gen_rfe(s);
4880
        } else if ((insn & 0x0e000000) == 0x0a000000) {
4881
            /* branch link and change to thumb (blx <offset>) */
4882
            int32_t offset;
4883

    
4884
            val = (uint32_t)s->pc;
4885
            gen_op_movl_T0_im(val);
4886
            gen_movl_reg_T0(s, 14);
4887
            /* Sign-extend the 24-bit offset */
4888
            offset = (((int32_t)insn) << 8) >> 8;
4889
            /* offset * 4 + bit24 * 2 + (thumb bit) */
4890
            val += (offset << 2) | ((insn >> 23) & 2) | 1;
4891
            /* pipeline offset */
4892
            val += 4;
4893
            gen_op_movl_T0_im(val);
4894
            gen_bx(s);
4895
            return;
4896
        } else if ((insn & 0x0e000f00) == 0x0c000100) {
4897
            if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
4898
                /* iWMMXt register transfer.  */
4899
                if (env->cp15.c15_cpar & (1 << 1))
4900
                    if (!disas_iwmmxt_insn(env, s, insn))
4901
                        return;
4902
            }
4903
        } else if ((insn & 0x0fe00000) == 0x0c400000) {
4904
            /* Coprocessor double register transfer.  */
4905
        } else if ((insn & 0x0f000010) == 0x0e000010) {
4906
            /* Additional coprocessor register transfer.  */
4907
        } else if ((insn & 0x0ff10010) == 0x01000000) {
4908
            uint32_t mask;
4909
            uint32_t val;
4910
            /* cps (privileged) */
4911
            if (IS_USER(s))
4912
                return;
4913
            mask = val = 0;
4914
            if (insn & (1 << 19)) {
4915
                if (insn & (1 << 8))
4916
                    mask |= CPSR_A;
4917
                if (insn & (1 << 7))
4918
                    mask |= CPSR_I;
4919
                if (insn & (1 << 6))
4920
                    mask |= CPSR_F;
4921
                if (insn & (1 << 18))
4922
                    val |= mask;
4923
            }
4924
            if (insn & (1 << 14)) {
4925
                mask |= CPSR_M;
4926
                val |= (insn & 0x1f);
4927
            }
4928
            if (mask) {
4929
                gen_op_movl_T0_im(val);
4930
                gen_set_psr_T0(s, mask, 0);
4931
            }
4932
            return;
4933
        }
4934
        goto illegal_op;
4935
    }
4936
    if (cond != 0xe) {
4937
        /* if not always execute, we generate a conditional jump to
4938
           next instruction */
4939
        s->condlabel = gen_new_label();
4940
        gen_test_cc[cond ^ 1](s->condlabel);
4941
        s->condjmp = 1;
4942
    }
4943
    if ((insn & 0x0f900000) == 0x03000000) {
4944
        if ((insn & (1 << 21)) == 0) {
4945
            ARCH(6T2);
4946
            rd = (insn >> 12) & 0xf;
4947
            val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
4948
            if ((insn & (1 << 22)) == 0) {
4949
                /* MOVW */
4950
                gen_op_movl_T0_im(val);
4951
            } else {
4952
                /* MOVT */
4953
                gen_movl_T0_reg(s, rd);
4954
                gen_op_movl_T1_im(0xffff);
4955
                gen_op_andl_T0_T1();
4956
                gen_op_movl_T1_im(val << 16);
4957
                gen_op_orl_T0_T1();
4958
            }
4959
            gen_movl_reg_T0(s, rd);
4960
        } else {
4961
            if (((insn >> 12) & 0xf) != 0xf)
4962
                goto illegal_op;
4963
            if (((insn >> 16) & 0xf) == 0) {
4964
                gen_nop_hint(s, insn & 0xff);
4965
            } else {
4966
                /* CPSR = immediate */
4967
                val = insn & 0xff;
4968
                shift = ((insn >> 8) & 0xf) * 2;
4969
                if (shift)
4970
                    val = (val >> shift) | (val << (32 - shift));
4971
                gen_op_movl_T0_im(val);
4972
                i = ((insn & (1 << 22)) != 0);
4973
                if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
4974
                    goto illegal_op;
4975
            }
4976
        }
4977
    } else if ((insn & 0x0f900000) == 0x01000000
4978
               && (insn & 0x00000090) != 0x00000090) {
4979
        /* miscellaneous instructions */
4980
        op1 = (insn >> 21) & 3;
4981
        sh = (insn >> 4) & 0xf;
4982
        rm = insn & 0xf;
4983
        switch (sh) {
4984
        case 0x0: /* move program status register */
4985
            if (op1 & 1) {
4986
                /* PSR = reg */
4987
                gen_movl_T0_reg(s, rm);
4988
                i = ((op1 & 2) != 0);
4989
                if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
4990
                    goto illegal_op;
4991
            } else {
4992
                /* reg = PSR */
4993
                rd = (insn >> 12) & 0xf;
4994
                if (op1 & 2) {
4995
                    if (IS_USER(s))
4996
                        goto illegal_op;
4997
                    gen_op_movl_T0_spsr();
4998
                } else {
4999
                    gen_op_movl_T0_cpsr();
5000
                }
5001
                gen_movl_reg_T0(s, rd);
5002
            }
5003
            break;
5004
        case 0x1:
5005
            if (op1 == 1) {
5006
                /* branch/exchange thumb (bx).  */
5007
                gen_movl_T0_reg(s, rm);
5008
                gen_bx(s);
5009
            } else if (op1 == 3) {
5010
                /* clz */
5011
                rd = (insn >> 12) & 0xf;
5012
                gen_movl_T0_reg(s, rm);
5013
                gen_op_clz_T0();
5014
                gen_movl_reg_T0(s, rd);
5015
            } else {
5016
                goto illegal_op;
5017
            }
5018
            break;
5019
        case 0x2:
5020
            if (op1 == 1) {
5021
                ARCH(5J); /* bxj */
5022
                /* Trivial implementation equivalent to bx.  */
5023
                gen_movl_T0_reg(s, rm);
5024
                gen_bx(s);
5025
            } else {
5026
                goto illegal_op;
5027
            }
5028
            break;
5029
        case 0x3:
5030
            if (op1 != 1)
5031
              goto illegal_op;
5032

    
5033
            /* branch link/exchange thumb (blx) */
5034
            val = (uint32_t)s->pc;
5035
            gen_op_movl_T1_im(val);
5036
            gen_movl_T0_reg(s, rm);
5037
            gen_movl_reg_T1(s, 14);
5038
            gen_bx(s);
5039
            break;
5040
        case 0x5: /* saturating add/subtract */
5041
            rd = (insn >> 12) & 0xf;
5042
            rn = (insn >> 16) & 0xf;
5043
            gen_movl_T0_reg(s, rm);
5044
            gen_movl_T1_reg(s, rn);
5045
            if (op1 & 2)
5046
                gen_op_double_T1_saturate();
5047
            if (op1 & 1)
5048
                gen_op_subl_T0_T1_saturate();
5049
            else
5050
                gen_op_addl_T0_T1_saturate();
5051
            gen_movl_reg_T0(s, rd);
5052
            break;
5053
        case 7: /* bkpt */
5054
            gen_set_condexec(s);
5055
            gen_op_movl_T0_im((long)s->pc - 4);
5056
            gen_set_pc_T0();
5057
            gen_op_bkpt();
5058
            s->is_jmp = DISAS_JUMP;
5059
            break;
5060
        case 0x8: /* signed multiply */
5061
        case 0xa:
5062
        case 0xc:
5063
        case 0xe:
5064
            rs = (insn >> 8) & 0xf;
5065
            rn = (insn >> 12) & 0xf;
5066
            rd = (insn >> 16) & 0xf;
5067
            if (op1 == 1) {
5068
                /* (32 * 16) >> 16 */
5069
                gen_movl_T0_reg(s, rm);
5070
                gen_movl_T1_reg(s, rs);
5071
                if (sh & 4)
5072
                    gen_op_sarl_T1_im(16);
5073
                else
5074
                    gen_sxth(cpu_T[1]);
5075
                gen_op_imulw_T0_T1();
5076
                if ((sh & 2) == 0) {
5077
                    gen_movl_T1_reg(s, rn);
5078
                    gen_op_addl_T0_T1_setq();
5079
                }
5080
                gen_movl_reg_T0(s, rd);
5081
            } else {
5082
                /* 16 * 16 */
5083
                gen_movl_T0_reg(s, rm);
5084
                gen_movl_T1_reg(s, rs);
5085
                gen_mulxy(sh & 2, sh & 4);
5086
                if (op1 == 2) {
5087
                    gen_op_signbit_T1_T0();
5088
                    gen_op_addq_T0_T1(rn, rd);
5089
                    gen_movl_reg_T0(s, rn);
5090
                    gen_movl_reg_T1(s, rd);
5091
                } else {
5092
                    if (op1 == 0) {
5093
                        gen_movl_T1_reg(s, rn);
5094
                        gen_op_addl_T0_T1_setq();
5095
                    }
5096
                    gen_movl_reg_T0(s, rd);
5097
                }
5098
            }
5099
            break;
5100
        default:
5101
            goto illegal_op;
5102
        }
5103
    } else if (((insn & 0x0e000000) == 0 &&
5104
                (insn & 0x00000090) != 0x90) ||
5105
               ((insn & 0x0e000000) == (1 << 25))) {
5106
        int set_cc, logic_cc, shiftop;
5107

    
5108
        op1 = (insn >> 21) & 0xf;
5109
        set_cc = (insn >> 20) & 1;
5110
        logic_cc = table_logic_cc[op1] & set_cc;
5111

    
5112
        /* data processing instruction */
5113
        if (insn & (1 << 25)) {
5114
            /* immediate operand */
5115
            val = insn & 0xff;
5116
            shift = ((insn >> 8) & 0xf) * 2;
5117
            if (shift)
5118
                val = (val >> shift) | (val << (32 - shift));
5119
            gen_op_movl_T1_im(val);
5120
            if (logic_cc && shift)
5121
                gen_set_CF_bit31(cpu_T[1]);
5122
        } else {
5123
            /* register */
5124
            rm = (insn) & 0xf;
5125
            gen_movl_T1_reg(s, rm);
5126
            shiftop = (insn >> 5) & 3;
5127
            if (!(insn & (1 << 4))) {
5128
                shift = (insn >> 7) & 0x1f;
5129
                if (logic_cc) {
5130
                    if (shift != 0) {
5131
                        gen_shift_T1_im_cc[shiftop](shift);
5132
                    } else if (shiftop != 0) {
5133
                        gen_shift_T1_0_cc[shiftop]();
5134
                    }
5135
                } else {
5136
                    gen_arm_shift_im(cpu_T[1], shiftop, shift);
5137
                }
5138
            } else {
5139
                rs = (insn >> 8) & 0xf;
5140
                gen_movl_T0_reg(s, rs);
5141
                if (logic_cc) {
5142
                    gen_shift_T1_T0_cc[shiftop]();
5143
                } else {
5144
                    gen_shift_T1_T0[shiftop]();
5145
                }
5146
            }
5147
        }
5148
        if (op1 != 0x0f && op1 != 0x0d) {
5149
            rn = (insn >> 16) & 0xf;
5150
            gen_movl_T0_reg(s, rn);
5151
        }
5152
        rd = (insn >> 12) & 0xf;
5153
        switch(op1) {
5154
        case 0x00:
5155
            gen_op_andl_T0_T1();
5156
            gen_movl_reg_T0(s, rd);
5157
            if (logic_cc)
5158
                gen_op_logic_T0_cc();
5159
            break;
5160
        case 0x01:
5161
            gen_op_xorl_T0_T1();
5162
            gen_movl_reg_T0(s, rd);
5163
            if (logic_cc)
5164
                gen_op_logic_T0_cc();
5165
            break;
5166
        case 0x02:
5167
            if (set_cc && rd == 15) {
5168
                /* SUBS r15, ... is used for exception return.  */
5169
                if (IS_USER(s))
5170
                    goto illegal_op;
5171
                gen_op_subl_T0_T1_cc();
5172
                gen_exception_return(s);
5173
            } else {
5174
                if (set_cc)
5175
                    gen_op_subl_T0_T1_cc();
5176
                else
5177
                    gen_op_subl_T0_T1();
5178
                gen_movl_reg_T0(s, rd);
5179
            }
5180
            break;
5181
        case 0x03:
5182
            if (set_cc)
5183
                gen_op_rsbl_T0_T1_cc();
5184
            else
5185
                gen_op_rsbl_T0_T1();
5186
            gen_movl_reg_T0(s, rd);
5187
            break;
5188
        case 0x04:
5189
            if (set_cc)
5190
                gen_op_addl_T0_T1_cc();
5191
            else
5192
                gen_op_addl_T0_T1();
5193
            gen_movl_reg_T0(s, rd);
5194
            break;
5195
        case 0x05:
5196
            if (set_cc)
5197
                gen_op_adcl_T0_T1_cc();
5198
            else
5199
                gen_adc_T0_T1();
5200
            gen_movl_reg_T0(s, rd);
5201
            break;
5202
        case 0x06:
5203
            if (set_cc)
5204
                gen_op_sbcl_T0_T1_cc();
5205
            else
5206
                gen_op_sbcl_T0_T1();
5207
            gen_movl_reg_T0(s, rd);
5208
            break;
5209
        case 0x07:
5210
            if (set_cc)
5211
                gen_op_rscl_T0_T1_cc();
5212
            else
5213
                gen_op_rscl_T0_T1();
5214
            gen_movl_reg_T0(s, rd);
5215
            break;
5216
        case 0x08:
5217
            if (set_cc) {
5218
                gen_op_andl_T0_T1();
5219
                gen_op_logic_T0_cc();
5220
            }
5221
            break;
5222
        case 0x09:
5223
            if (set_cc) {
5224
                gen_op_xorl_T0_T1();
5225
                gen_op_logic_T0_cc();
5226
            }
5227
            break;
5228
        case 0x0a:
5229
            if (set_cc) {
5230
                gen_op_subl_T0_T1_cc();
5231
            }
5232
            break;
5233
        case 0x0b:
5234
            if (set_cc) {
5235
                gen_op_addl_T0_T1_cc();
5236
            }
5237
            break;
5238
        case 0x0c:
5239
            gen_op_orl_T0_T1();
5240
            gen_movl_reg_T0(s, rd);
5241
            if (logic_cc)
5242
                gen_op_logic_T0_cc();
5243
            break;
5244
        case 0x0d:
5245
            if (logic_cc && rd == 15) {
5246
                /* MOVS r15, ... is used for exception return.  */
5247
                if (IS_USER(s))
5248
                    goto illegal_op;
5249
                gen_op_movl_T0_T1();
5250
                gen_exception_return(s);
5251
            } else {
5252
                gen_movl_reg_T1(s, rd);
5253
                if (logic_cc)
5254
                    gen_op_logic_T1_cc();
5255
            }
5256
            break;
5257
        case 0x0e:
5258
            gen_op_bicl_T0_T1();
5259
            gen_movl_reg_T0(s, rd);
5260
            if (logic_cc)
5261
                gen_op_logic_T0_cc();
5262
            break;
5263
        default:
5264
        case 0x0f:
5265
            gen_op_notl_T1();
5266
            gen_movl_reg_T1(s, rd);
5267
            if (logic_cc)
5268
                gen_op_logic_T1_cc();
5269
            break;
5270
        }
5271
    } else {
5272
        /* other instructions */
5273
        op1 = (insn >> 24) & 0xf;
5274
        switch(op1) {
5275
        case 0x0:
5276
        case 0x1:
5277
            /* multiplies, extra load/stores */
5278
            sh = (insn >> 5) & 3;
5279
            if (sh == 0) {
5280
                if (op1 == 0x0) {
5281
                    rd = (insn >> 16) & 0xf;
5282
                    rn = (insn >> 12) & 0xf;
5283
                    rs = (insn >> 8) & 0xf;
5284
                    rm = (insn) & 0xf;
5285
                    op1 = (insn >> 20) & 0xf;
5286
                    switch (op1) {
5287
                    case 0: case 1: case 2: case 3: case 6:
5288
                        /* 32 bit mul */
5289
                        gen_movl_T0_reg(s, rs);
5290
                        gen_movl_T1_reg(s, rm);
5291
                        gen_op_mul_T0_T1();
5292
                        if (insn & (1 << 22)) {
5293
                            /* Subtract (mls) */
5294
                            ARCH(6T2);
5295
                            gen_movl_T1_reg(s, rn);
5296
                            gen_op_rsbl_T0_T1();
5297
                        } else if (insn & (1 << 21)) {
5298
                            /* Add */
5299
                            gen_movl_T1_reg(s, rn);
5300
                            gen_op_addl_T0_T1();
5301
                        }
5302
                        if (insn & (1 << 20))
5303
                            gen_op_logic_T0_cc();
5304
                        gen_movl_reg_T0(s, rd);
5305
                        break;
5306
                    default:
5307
                        /* 64 bit mul */
5308
                        gen_movl_T0_reg(s, rs);
5309
                        gen_movl_T1_reg(s, rm);
5310
                        if (insn & (1 << 22))
5311
                            gen_op_imull_T0_T1();
5312
                        else
5313
                            gen_op_mull_T0_T1();
5314
                        if (insn & (1 << 21)) /* mult accumulate */
5315
                            gen_op_addq_T0_T1(rn, rd);
5316
                        if (!(insn & (1 << 23))) { /* double accumulate */
5317
                            ARCH(6);
5318
                            gen_op_addq_lo_T0_T1(rn);
5319
                            gen_op_addq_lo_T0_T1(rd);
5320
                        }
5321
                        if (insn & (1 << 20))
5322
                            gen_op_logicq_cc();
5323
                        gen_movl_reg_T0(s, rn);
5324
                        gen_movl_reg_T1(s, rd);
5325
                        break;
5326
                    }
5327
                } else {
5328
                    rn = (insn >> 16) & 0xf;
5329
                    rd = (insn >> 12) & 0xf;
5330
                    if (insn & (1 << 23)) {
5331
                        /* load/store exclusive */
5332
                        gen_movl_T1_reg(s, rn);
5333
                        if (insn & (1 << 20)) {
5334
                            gen_ldst(ldlex, s);
5335
                        } else {
5336
                            rm = insn & 0xf;
5337
                            gen_movl_T0_reg(s, rm);
5338
                            gen_ldst(stlex, s);
5339
                        }
5340
                        gen_movl_reg_T0(s, rd);
5341
                    } else {
5342
                        /* SWP instruction */
5343
                        rm = (insn) & 0xf;
5344

    
5345
                        gen_movl_T0_reg(s, rm);
5346
                        gen_movl_T1_reg(s, rn);
5347
                        if (insn & (1 << 22)) {
5348
                            gen_ldst(swpb, s);
5349
                        } else {
5350
                            gen_ldst(swpl, s);
5351
                        }
5352
                        gen_movl_reg_T0(s, rd);
5353
                    }
5354
                }
5355
            } else {
5356
                int address_offset;
5357
                int load;
5358
                /* Misc load/store */
5359
                rn = (insn >> 16) & 0xf;
5360
                rd = (insn >> 12) & 0xf;
5361
                gen_movl_T1_reg(s, rn);
5362
                if (insn & (1 << 24))
5363
                    gen_add_datah_offset(s, insn, 0);
5364
                address_offset = 0;
5365
                if (insn & (1 << 20)) {
5366
                    /* load */
5367
                    switch(sh) {
5368
                    case 1:
5369
                        gen_ldst(lduw, s);
5370
                        break;
5371
                    case 2:
5372
                        gen_ldst(ldsb, s);
5373
                        break;
5374
                    default:
5375
                    case 3:
5376
                        gen_ldst(ldsw, s);
5377
                        break;
5378
                    }
5379
                    load = 1;
5380
                } else if (sh & 2) {
5381
                    /* doubleword */
5382
                    if (sh & 1) {
5383
                        /* store */
5384
                        gen_movl_T0_reg(s, rd);
5385
                        gen_ldst(stl, s);
5386
                        gen_op_addl_T1_im(4);
5387
                        gen_movl_T0_reg(s, rd + 1);
5388
                        gen_ldst(stl, s);
5389
                        load = 0;
5390
                    } else {
5391
                        /* load */
5392
                        gen_ldst(ldl, s);
5393
                        gen_movl_reg_T0(s, rd);
5394
                        gen_op_addl_T1_im(4);
5395
                        gen_ldst(ldl, s);
5396
                        rd++;
5397
                        load = 1;
5398
                    }
5399
                    address_offset = -4;
5400
                } else {
5401
                    /* store */
5402
                    gen_movl_T0_reg(s, rd);
5403
                    gen_ldst(stw, s);
5404
                    load = 0;
5405
                }
5406
                /* Perform base writeback before the loaded value to
5407
                   ensure correct behavior with overlapping index registers.
5408
                   ldrd with base writeback is is undefined if the
5409
                   destination and index registers overlap.  */
5410
                if (!(insn & (1 << 24))) {
5411
                    gen_add_datah_offset(s, insn, address_offset);
5412
                    gen_movl_reg_T1(s, rn);
5413
                } else if (insn & (1 << 21)) {
5414
                    if (address_offset)
5415
                        gen_op_addl_T1_im(address_offset);
5416
                    gen_movl_reg_T1(s, rn);
5417
                }
5418
                if (load) {
5419
                    /* Complete the load.  */
5420
                    gen_movl_reg_T0(s, rd);
5421
                }
5422
            }
5423
            break;
5424
        case 0x4:
5425
        case 0x5:
5426
            goto do_ldst;
5427
        case 0x6:
5428
        case 0x7:
5429
            if (insn & (1 << 4)) {
5430
                ARCH(6);
5431
                /* Armv6 Media instructions.  */
5432
                rm = insn & 0xf;
5433
                rn = (insn >> 16) & 0xf;
5434
                rd = (insn >> 12) & 0xf;
5435
                rs = (insn >> 8) & 0xf;
5436
                switch ((insn >> 23) & 3) {
5437
                case 0: /* Parallel add/subtract.  */
5438
                    op1 = (insn >> 20) & 7;
5439
                    gen_movl_T0_reg(s, rn);
5440
                    gen_movl_T1_reg(s, rm);
5441
                    sh = (insn >> 5) & 7;
5442
                    if ((op1 & 3) == 0 || sh == 5 || sh == 6)
5443
                        goto illegal_op;
5444
                    gen_arm_parallel_addsub[op1][sh]();
5445
                    gen_movl_reg_T0(s, rd);
5446
                    break;
5447
                case 1:
5448
                    if ((insn & 0x00700020) == 0) {
5449
                        /* Hafword pack.  */
5450
                        gen_movl_T0_reg(s, rn);
5451
                        gen_movl_T1_reg(s, rm);
5452
                        shift = (insn >> 7) & 0x1f;
5453
                        if (shift)
5454
                            gen_op_shll_T1_im(shift);
5455
                        if (insn & (1 << 6))
5456
                            gen_op_pkhtb_T0_T1();
5457
                        else
5458
                            gen_op_pkhbt_T0_T1();
5459
                        gen_movl_reg_T0(s, rd);
5460
                    } else if ((insn & 0x00200020) == 0x00200000) {
5461
                        /* [us]sat */
5462
                        gen_movl_T1_reg(s, rm);
5463
                        shift = (insn >> 7) & 0x1f;
5464
                        if (insn & (1 << 6)) {
5465
                            if (shift == 0)
5466
                                shift = 31;
5467
                            gen_op_sarl_T1_im(shift);
5468
                        } else {
5469
                            gen_op_shll_T1_im(shift);
5470
                        }
5471
                        sh = (insn >> 16) & 0x1f;
5472
                        if (sh != 0) {
5473
                            if (insn & (1 << 22))
5474
                                gen_op_usat_T1(sh);
5475
                            else
5476
                                gen_op_ssat_T1(sh);
5477
                        }
5478
                        gen_movl_T1_reg(s, rd);
5479
                    } else if ((insn & 0x00300fe0) == 0x00200f20) {
5480
                        /* [us]sat16 */
5481
                        gen_movl_T1_reg(s, rm);
5482
                        sh = (insn >> 16) & 0x1f;
5483
                        if (sh != 0) {
5484
                            if (insn & (1 << 22))
5485
                                gen_op_usat16_T1(sh);
5486
                            else
5487
                                gen_op_ssat16_T1(sh);
5488
                        }
5489
                        gen_movl_T1_reg(s, rd);
5490
                    } else if ((insn & 0x00700fe0) == 0x00000fa0) {
5491
                        /* Select bytes.  */
5492
                        gen_movl_T0_reg(s, rn);
5493
                        gen_movl_T1_reg(s, rm);
5494
                        gen_op_sel_T0_T1();
5495
                        gen_movl_reg_T0(s, rd);
5496
                    } else if ((insn & 0x000003e0) == 0x00000060) {
5497
                        gen_movl_T1_reg(s, rm);
5498
                        shift = (insn >> 10) & 3;
5499
                        /* ??? In many cases it's not neccessary to do a
5500
                           rotate, a shift is sufficient.  */
5501
                        if (shift != 0)
5502
                            gen_op_rorl_T1_im(shift * 8);
5503
                        op1 = (insn >> 20) & 7;
5504
                        switch (op1) {
5505
                        case 0: gen_sxtb16(cpu_T[1]); break;
5506
                        case 2: gen_sxtb(cpu_T[1]);   break;
5507
                        case 3: gen_sxth(cpu_T[1]);   break;
5508
                        case 4: gen_uxtb16(cpu_T[1]); break;
5509
                        case 6: gen_uxtb(cpu_T[1]);   break;
5510
                        case 7: gen_uxth(cpu_T[1]);   break;
5511
                        default: goto illegal_op;
5512
                        }
5513
                        if (rn != 15) {
5514
                            tmp = load_reg(s, rn);
5515
                            if ((op1 & 3) == 0) {
5516
                                gen_add16(cpu_T[1], tmp);
5517
                            } else {
5518
                                tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
5519
                                dead_tmp(tmp);
5520
                            }
5521
                        }
5522
                        gen_movl_reg_T1(s, rd);
5523
                    } else if ((insn & 0x003f0f60) == 0x003f0f20) {
5524
                        /* rev */
5525
                        gen_movl_T0_reg(s, rm);
5526
                        if (insn & (1 << 22)) {
5527
                            if (insn & (1 << 7)) {
5528
                                gen_op_revsh_T0();
5529
                            } else {
5530
                                ARCH(6T2);
5531
                                gen_op_rbit_T0();
5532
                            }
5533
                        } else {
5534
                            if (insn & (1 << 7))
5535
                                gen_op_rev16_T0();
5536
                            else
5537
                                gen_op_rev_T0();
5538
                        }
5539
                        gen_movl_reg_T0(s, rd);
5540
                    } else {
5541
                        goto illegal_op;
5542
                    }
5543
                    break;
5544
                case 2: /* Multiplies (Type 3).  */
5545
                    gen_movl_T0_reg(s, rm);
5546
                    gen_movl_T1_reg(s, rs);
5547
                    if (insn & (1 << 20)) {
5548
                        /* Signed multiply most significant [accumulate].  */
5549
                        gen_op_imull_T0_T1();
5550
                        if (insn & (1 << 5))
5551
                            gen_op_roundqd_T0_T1();
5552
                        else
5553
                            gen_op_movl_T0_T1();
5554
                        if (rn != 15) {
5555
                            gen_movl_T1_reg(s, rn);
5556
                            if (insn & (1 << 6)) {
5557
                                gen_op_addl_T0_T1();
5558
                            } else {
5559
                                gen_op_rsbl_T0_T1();
5560
                            }
5561
                        }
5562
                        gen_movl_reg_T0(s, rd);
5563
                    } else {
5564
                        if (insn & (1 << 5))
5565
                            gen_op_swap_half_T1();
5566
                        gen_op_mul_dual_T0_T1();
5567
                        if (insn & (1 << 22)) {
5568
                            if (insn & (1 << 6)) {
5569
                                /* smlald */
5570
                                gen_op_addq_T0_T1_dual(rn, rd);
5571
                            } else {
5572
                                /* smlsld */
5573
                                gen_op_subq_T0_T1_dual(rn, rd);
5574
                            }
5575
                        } else {
5576
                            /* This addition cannot overflow.  */
5577
                            if (insn & (1 << 6)) {
5578
                                /* sm[ul]sd */
5579
                                gen_op_subl_T0_T1();
5580
                            } else {
5581
                                /* sm[ul]ad */
5582
                                gen_op_addl_T0_T1();
5583
                            }
5584
                            if (rn != 15)
5585
                              {
5586
                                gen_movl_T1_reg(s, rn);
5587
                                gen_op_addl_T0_T1_setq();
5588
                              }
5589
                            gen_movl_reg_T0(s, rd);
5590
                        }
5591
                    }
5592
                    break;
5593
                case 3:
5594
                    op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
5595
                    switch (op1) {
5596
                    case 0: /* Unsigned sum of absolute differences.  */
5597
                            goto illegal_op;
5598
                        gen_movl_T0_reg(s, rm);
5599
                        gen_movl_T1_reg(s, rs);
5600
                        gen_op_usad8_T0_T1();
5601
                        if (rn != 15) {
5602
                            gen_movl_T1_reg(s, rn);
5603
                            gen_op_addl_T0_T1();
5604
                        }
5605
                        gen_movl_reg_T0(s, rd);
5606
                        break;
5607
                    case 0x20: case 0x24: case 0x28: case 0x2c:
5608
                        /* Bitfield insert/clear.  */
5609
                        ARCH(6T2);
5610
                        shift = (insn >> 7) & 0x1f;
5611
                        i = (insn >> 16) & 0x1f;
5612
                        i = i + 1 - shift;
5613
                        if (rm == 15) {
5614
                            gen_op_movl_T1_im(0);
5615
                        } else {
5616
                            gen_movl_T1_reg(s, rm);
5617
                        }
5618
                        if (i != 32) {
5619
                            gen_movl_T0_reg(s, rd);
5620
                            gen_op_bfi_T1_T0(shift, ((1u << i) - 1) << shift);
5621
                        }
5622
                        gen_movl_reg_T1(s, rd);
5623
                        break;
5624
                    case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
5625
                    case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5626
                        gen_movl_T1_reg(s, rm);
5627
                        shift = (insn >> 7) & 0x1f;
5628
                        i = ((insn >> 16) & 0x1f) + 1;
5629
                        if (shift + i > 32)
5630
                            goto illegal_op;
5631
                        if (i < 32) {
5632
                            if (op1 & 0x20) {
5633
                                gen_op_ubfx_T1(shift, (1u << i) - 1);
5634
                            } else {
5635
                                gen_op_sbfx_T1(shift, i);
5636
                            }
5637
                        }
5638
                        gen_movl_reg_T1(s, rd);
5639
                        break;
5640
                    default:
5641
                        goto illegal_op;
5642
                    }
5643
                    break;
5644
                }
5645
                break;
5646
            }
5647
        do_ldst:
5648
            /* Check for undefined extension instructions
5649
             * per the ARM Bible IE:
5650
             * xxxx 0111 1111 xxxx  xxxx xxxx 1111 xxxx
5651
             */
5652
            sh = (0xf << 20) | (0xf << 4);
5653
            if (op1 == 0x7 && ((insn & sh) == sh))
5654
            {
5655
                goto illegal_op;
5656
            }
5657
            /* load/store byte/word */
5658
            rn = (insn >> 16) & 0xf;
5659
            rd = (insn >> 12) & 0xf;
5660
            gen_movl_T1_reg(s, rn);
5661
            i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
5662
            if (insn & (1 << 24))
5663
                gen_add_data_offset(s, insn);
5664
            if (insn & (1 << 20)) {
5665
                /* load */
5666
                s->is_mem = 1;
5667
#if defined(CONFIG_USER_ONLY)
5668
                if (insn & (1 << 22))
5669
                    gen_op_ldub_raw();
5670
                else
5671
                    gen_op_ldl_raw();
5672
#else
5673
                if (insn & (1 << 22)) {
5674
                    if (i)
5675
                        gen_op_ldub_user();
5676
                    else
5677
                        gen_op_ldub_kernel();
5678
                } else {
5679
                    if (i)
5680
                        gen_op_ldl_user();
5681
                    else
5682
                        gen_op_ldl_kernel();
5683
                }
5684
#endif
5685
            } else {
5686
                /* store */
5687
                gen_movl_T0_reg(s, rd);
5688
#if defined(CONFIG_USER_ONLY)
5689
                if (insn & (1 << 22))
5690
                    gen_op_stb_raw();
5691
                else
5692
                    gen_op_stl_raw();
5693
#else
5694
                if (insn & (1 << 22)) {
5695
                    if (i)
5696
                        gen_op_stb_user();
5697
                    else
5698
                        gen_op_stb_kernel();
5699
                } else {
5700
                    if (i)
5701
                        gen_op_stl_user();
5702
                    else
5703
                        gen_op_stl_kernel();
5704
                }
5705
#endif
5706
            }
5707
            if (!(insn & (1 << 24))) {
5708
                gen_add_data_offset(s, insn);
5709
                gen_movl_reg_T1(s, rn);
5710
            } else if (insn & (1 << 21))
5711
                gen_movl_reg_T1(s, rn); {
5712
            }
5713
            if (insn & (1 << 20)) {
5714
                /* Complete the load.  */
5715
                if (rd == 15)
5716
                    gen_bx(s);
5717
                else
5718
                    gen_movl_reg_T0(s, rd);
5719
            }
5720
            break;
5721
        case 0x08:
5722
        case 0x09:
5723
            {
5724
                int j, n, user, loaded_base;
5725
                /* load/store multiple words */
5726
                /* XXX: store correct base if write back */
5727
                user = 0;
5728
                if (insn & (1 << 22)) {
5729
                    if (IS_USER(s))
5730
                        goto illegal_op; /* only usable in supervisor mode */
5731

    
5732
                    if ((insn & (1 << 15)) == 0)
5733
                        user = 1;
5734
                }
5735
                rn = (insn >> 16) & 0xf;
5736
                gen_movl_T1_reg(s, rn);
5737

    
5738
                /* compute total size */
5739
                loaded_base = 0;
5740
                n = 0;
5741
                for(i=0;i<16;i++) {
5742
                    if (insn & (1 << i))
5743
                        n++;
5744
                }
5745
                /* XXX: test invalid n == 0 case ? */
5746
                if (insn & (1 << 23)) {
5747
                    if (insn & (1 << 24)) {
5748
                        /* pre increment */
5749
                        gen_op_addl_T1_im(4);
5750
                    } else {
5751
                        /* post increment */
5752
                    }
5753
                } else {
5754
                    if (insn & (1 << 24)) {
5755
                        /* pre decrement */
5756
                        gen_op_addl_T1_im(-(n * 4));
5757
                    } else {
5758
                        /* post decrement */
5759
                        if (n != 1)
5760
                            gen_op_addl_T1_im(-((n - 1) * 4));
5761
                    }
5762
                }
5763
                j = 0;
5764
                for(i=0;i<16;i++) {
5765
                    if (insn & (1 << i)) {
5766
                        if (insn & (1 << 20)) {
5767
                            /* load */
5768
                            gen_ldst(ldl, s);
5769
                            if (i == 15) {
5770
                                gen_bx(s);
5771
                            } else if (user) {
5772
                                gen_op_movl_user_T0(i);
5773
                            } else if (i == rn) {
5774
                                gen_op_movl_T2_T0();
5775
                                loaded_base = 1;
5776
                            } else {
5777
                                gen_movl_reg_T0(s, i);
5778
                            }
5779
                        } else {
5780
                            /* store */
5781
                            if (i == 15) {
5782
                                /* special case: r15 = PC + 8 */
5783
                                val = (long)s->pc + 4;
5784
                                gen_op_movl_T0_im(val);
5785
                            } else if (user) {
5786
                                gen_op_movl_T0_user(i);
5787
                            } else {
5788
                                gen_movl_T0_reg(s, i);
5789
                            }
5790
                            gen_ldst(stl, s);
5791
                        }
5792
                        j++;
5793
                        /* no need to add after the last transfer */
5794
                        if (j != n)
5795
                            gen_op_addl_T1_im(4);
5796
                    }
5797
                }
5798
                if (insn & (1 << 21)) {
5799
                    /* write back */
5800
                    if (insn & (1 << 23)) {
5801
                        if (insn & (1 << 24)) {
5802
                            /* pre increment */
5803
                        } else {
5804
                            /* post increment */
5805
                            gen_op_addl_T1_im(4);
5806
                        }
5807
                    } else {
5808
                        if (insn & (1 << 24)) {
5809
                            /* pre decrement */
5810
                            if (n != 1)
5811
                                gen_op_addl_T1_im(-((n - 1) * 4));
5812
                        } else {
5813
                            /* post decrement */
5814
                            gen_op_addl_T1_im(-(n * 4));
5815
                        }
5816
                    }
5817
                    gen_movl_reg_T1(s, rn);
5818
                }
5819
                if (loaded_base) {
5820
                    gen_op_movl_T0_T2();
5821
                    gen_movl_reg_T0(s, rn);
5822
                }
5823
                if ((insn & (1 << 22)) && !user) {
5824
                    /* Restore CPSR from SPSR.  */
5825
                    gen_op_movl_T0_spsr();
5826
                    gen_op_movl_cpsr_T0(0xffffffff);
5827
                    s->is_jmp = DISAS_UPDATE;
5828
                }
5829
            }
5830
            break;
5831
        case 0xa:
5832
        case 0xb:
5833
            {
5834
                int32_t offset;
5835

    
5836
                /* branch (and link) */
5837
                val = (int32_t)s->pc;
5838
                if (insn & (1 << 24)) {
5839
                    gen_op_movl_T0_im(val);
5840
                    gen_movl_reg_T0(s, 14);
5841
                }
5842
                offset = (((int32_t)insn << 8) >> 8);
5843
                val += (offset << 2) + 4;
5844
                gen_jmp(s, val);
5845
            }
5846
            break;
5847
        case 0xc:
5848
        case 0xd:
5849
        case 0xe:
5850
            /* Coprocessor.  */
5851
            if (disas_coproc_insn(env, s, insn))
5852
                goto illegal_op;
5853
            break;
5854
        case 0xf:
5855
            /* swi */
5856
            gen_op_movl_T0_im((long)s->pc);
5857
            gen_set_pc_T0();
5858
            s->is_jmp = DISAS_SWI;
5859
            break;
5860
        default:
5861
        illegal_op:
5862
            gen_set_condexec(s);
5863
            gen_op_movl_T0_im((long)s->pc - 4);
5864
            gen_set_pc_T0();
5865
            gen_op_undef_insn();
5866
            s->is_jmp = DISAS_JUMP;
5867
            break;
5868
        }
5869
    }
5870
}
5871

    
5872
/* Return true if this is a Thumb-2 logical op.  */
5873
static int
5874
thumb2_logic_op(int op)
5875
{
5876
    return (op < 8);
5877
}
5878

    
5879
/* Generate code for a Thumb-2 data processing operation.  If CONDS is nonzero
5880
   then set condition code flags based on the result of the operation.
5881
   If SHIFTER_OUT is nonzero then set the carry flag for logical operations
5882
   to the high bit of T1.
5883
   Returns zero if the opcode is valid.  */
5884

    
5885
static int
5886
gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
5887
{
5888
    int logic_cc;
5889

    
5890
    logic_cc = 0;
5891
    switch (op) {
5892
    case 0: /* and */
5893
        gen_op_andl_T0_T1();
5894
        logic_cc = conds;
5895
        break;
5896
    case 1: /* bic */
5897
        gen_op_bicl_T0_T1();
5898
        logic_cc = conds;
5899
        break;
5900
    case 2: /* orr */
5901
        gen_op_orl_T0_T1();
5902
        logic_cc = conds;
5903
        break;
5904
    case 3: /* orn */
5905
        gen_op_notl_T1();
5906
        gen_op_orl_T0_T1();
5907
        logic_cc = conds;
5908
        break;
5909
    case 4: /* eor */
5910
        gen_op_xorl_T0_T1();
5911
        logic_cc = conds;
5912
        break;
5913
    case 8: /* add */
5914
        if (conds)
5915
            gen_op_addl_T0_T1_cc();
5916
        else
5917
            gen_op_addl_T0_T1();
5918
        break;
5919
    case 10: /* adc */
5920
        if (conds)
5921
            gen_op_adcl_T0_T1_cc();
5922
        else
5923
            gen_adc_T0_T1();
5924
        break;
5925
    case 11: /* sbc */
5926
        if (conds)
5927
            gen_op_sbcl_T0_T1_cc();
5928
        else
5929
            gen_op_sbcl_T0_T1();
5930
        break;
5931
    case 13: /* sub */
5932
        if (conds)
5933
            gen_op_subl_T0_T1_cc();
5934
        else
5935
            gen_op_subl_T0_T1();
5936
        break;
5937
    case 14: /* rsb */
5938
        if (conds)
5939
            gen_op_rsbl_T0_T1_cc();
5940
        else
5941
            gen_op_rsbl_T0_T1();
5942
        break;
5943
    default: /* 5, 6, 7, 9, 12, 15. */
5944
        return 1;
5945
    }
5946
    if (logic_cc) {
5947
        gen_op_logic_T0_cc();
5948
        if (shifter_out)
5949
            gen_set_CF_bit31(cpu_T[1]);
5950
    }
5951
    return 0;
5952
}
5953

    
5954
/* Translate a 32-bit thumb instruction.  Returns nonzero if the instruction
5955
   is not legal.  */
5956
static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
5957
{
5958
    uint32_t insn, imm, shift, offset, addr;
5959
    uint32_t rd, rn, rm, rs;
5960
    TCGv tmp;
5961
    int op;
5962
    int shiftop;
5963
    int conds;
5964
    int logic_cc;
5965

    
5966
    if (!(arm_feature(env, ARM_FEATURE_THUMB2)
5967
          || arm_feature (env, ARM_FEATURE_M))) {
5968
        /* Thumb-1 cores may need to tread bl and blx as a pair of
5969
           16-bit instructions to get correct prefetch abort behavior.  */
5970
        insn = insn_hw1;
5971
        if ((insn & (1 << 12)) == 0) {
5972
            /* Second half of blx.  */
5973
            offset = ((insn & 0x7ff) << 1);
5974
            gen_movl_T0_reg(s, 14);
5975
            gen_op_movl_T1_im(offset);
5976
            gen_op_addl_T0_T1();
5977
            gen_op_movl_T1_im(0xfffffffc);
5978
            gen_op_andl_T0_T1();
5979

    
5980
            addr = (uint32_t)s->pc;
5981
            gen_op_movl_T1_im(addr | 1);
5982
            gen_movl_reg_T1(s, 14);
5983
            gen_bx(s);
5984
            return 0;
5985
        }
5986
        if (insn & (1 << 11)) {
5987
            /* Second half of bl.  */
5988
            offset = ((insn & 0x7ff) << 1) | 1;
5989
            gen_movl_T0_reg(s, 14);
5990
            gen_op_movl_T1_im(offset);
5991
            gen_op_addl_T0_T1();
5992

    
5993
            addr = (uint32_t)s->pc;
5994
            gen_op_movl_T1_im(addr | 1);
5995
            gen_movl_reg_T1(s, 14);
5996
            gen_bx(s);
5997
            return 0;
5998
        }
5999
        if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
6000
            /* Instruction spans a page boundary.  Implement it as two
6001
               16-bit instructions in case the second half causes an
6002
               prefetch abort.  */
6003
            offset = ((int32_t)insn << 21) >> 9;
6004
            addr = s->pc + 2 + offset;
6005
            gen_op_movl_T0_im(addr);
6006
            gen_movl_reg_T0(s, 14);
6007
            return 0;
6008
        }
6009
        /* Fall through to 32-bit decode.  */
6010
    }
6011

    
6012
    insn = lduw_code(s->pc);
6013
    s->pc += 2;
6014
    insn |= (uint32_t)insn_hw1 << 16;
6015

    
6016
    if ((insn & 0xf800e800) != 0xf000e800) {
6017
        ARCH(6T2);
6018
    }
6019

    
6020
    rn = (insn >> 16) & 0xf;
6021
    rs = (insn >> 12) & 0xf;
6022
    rd = (insn >> 8) & 0xf;
6023
    rm = insn & 0xf;
6024
    switch ((insn >> 25) & 0xf) {
6025
    case 0: case 1: case 2: case 3:
6026
        /* 16-bit instructions.  Should never happen.  */
6027
        abort();
6028
    case 4:
6029
        if (insn & (1 << 22)) {
6030
            /* Other load/store, table branch.  */
6031
            if (insn & 0x01200000) {
6032
                /* Load/store doubleword.  */
6033
                if (rn == 15) {
6034
                    gen_op_movl_T1_im(s->pc & ~3);
6035
                } else {
6036
                    gen_movl_T1_reg(s, rn);
6037
                }
6038
                offset = (insn & 0xff) * 4;
6039
                if ((insn & (1 << 23)) == 0)
6040
                    offset = -offset;
6041
                if (insn & (1 << 24)) {
6042
                    gen_op_addl_T1_im(offset);
6043
                    offset = 0;
6044
                }
6045
                if (insn & (1 << 20)) {
6046
                    /* ldrd */
6047
                    gen_ldst(ldl, s);
6048
                    gen_movl_reg_T0(s, rs);
6049
                    gen_op_addl_T1_im(4);
6050
                    gen_ldst(ldl, s);
6051
                    gen_movl_reg_T0(s, rd);
6052
                } else {
6053
                    /* strd */
6054
                    gen_movl_T0_reg(s, rs);
6055
                    gen_ldst(stl, s);
6056
                    gen_op_addl_T1_im(4);
6057
                    gen_movl_T0_reg(s, rd);
6058
                    gen_ldst(stl, s);
6059
                }
6060
                if (insn & (1 << 21)) {
6061
                    /* Base writeback.  */
6062
                    if (rn == 15)
6063
                        goto illegal_op;
6064
                    gen_op_addl_T1_im(offset - 4);
6065
                    gen_movl_reg_T1(s, rn);
6066
                }
6067
            } else if ((insn & (1 << 23)) == 0) {
6068
                /* Load/store exclusive word.  */
6069
                gen_movl_T0_reg(s, rd);
6070
                gen_movl_T1_reg(s, rn);
6071
                if (insn & (1 << 20)) {
6072
                    gen_ldst(ldlex, s);
6073
                } else {
6074
                    gen_ldst(stlex, s);
6075
                }
6076
                gen_movl_reg_T0(s, rd);
6077
            } else if ((insn & (1 << 6)) == 0) {
6078
                /* Table Branch.  */
6079
                if (rn == 15) {
6080
                    gen_op_movl_T1_im(s->pc);
6081
                } else {
6082
                    gen_movl_T1_reg(s, rn);
6083
                }
6084
                tmp = load_reg(s, rm);
6085
                tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
6086
                if (insn & (1 << 4)) {
6087
                    /* tbh */
6088
                    tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
6089
                    dead_tmp(tmp);
6090
                    gen_ldst(lduw, s);
6091
                } else { /* tbb */
6092
                    dead_tmp(tmp);
6093
                    gen_ldst(ldub, s);
6094
                }
6095
                gen_op_jmp_T0_im(s->pc);
6096
                s->is_jmp = DISAS_JUMP;
6097
            } else {
6098
                /* Load/store exclusive byte/halfword/doubleword.  */
6099
                op = (insn >> 4) & 0x3;
6100
                gen_movl_T1_reg(s, rn);
6101
                if (insn & (1 << 20)) {
6102
                    switch (op) {
6103
                    case 0:
6104
                        gen_ldst(ldbex, s);
6105
                        break;
6106
                    case 1:
6107
                        gen_ldst(ldwex, s);
6108
                        break;
6109
                    case 3:
6110
                        gen_ldst(ldqex, s);
6111
                        gen_movl_reg_T1(s, rd);
6112
                        break;
6113
                    default:
6114
                        goto illegal_op;
6115
                    }
6116
                    gen_movl_reg_T0(s, rs);
6117
                } else {
6118
                    gen_movl_T0_reg(s, rs);
6119
                    switch (op) {
6120
                    case 0:
6121
                        gen_ldst(stbex, s);
6122
                        break;
6123
                    case 1:
6124
                        gen_ldst(stwex, s);
6125
                        break;
6126
                    case 3:
6127
                        gen_movl_T2_reg(s, rd);
6128
                        gen_ldst(stqex, s);
6129
                        break;
6130
                    default:
6131
                        goto illegal_op;
6132
                    }
6133
                    gen_movl_reg_T0(s, rm);
6134
                }
6135
            }
6136
        } else {
6137
            /* Load/store multiple, RFE, SRS.  */
6138
            if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
6139
                /* Not available in user mode.  */
6140
                if (!IS_USER(s))
6141
                    goto illegal_op;
6142
                if (insn & (1 << 20)) {
6143
                    /* rfe */
6144
                    gen_movl_T1_reg(s, rn);
6145
                    if (insn & (1 << 24)) {
6146
                        gen_op_addl_T1_im(4);
6147
                    } else {
6148
                        gen_op_addl_T1_im(-4);
6149
                    }
6150
                    /* Load CPSR into T2 and PC into T0.  */
6151
                    gen_ldst(ldl, s);
6152
                    gen_op_movl_T2_T0();
6153
                    gen_op_addl_T1_im(-4);
6154
                    gen_ldst(ldl, s);
6155
                    if (insn & (1 << 21)) {
6156
                        /* Base writeback.  */
6157
                        if (insn & (1 << 24))
6158
                            gen_op_addl_T1_im(8);
6159
                        gen_movl_reg_T1(s, rn);
6160
                    }
6161
                    gen_rfe(s);
6162
                } else {
6163
                    /* srs */
6164
                    op = (insn & 0x1f);
6165
                    if (op == (env->uncached_cpsr & CPSR_M)) {
6166
                        gen_movl_T1_reg(s, 13);
6167
                    } else {
6168
                        gen_op_movl_T1_r13_banked(op);
6169
                    }
6170
                    if ((insn & (1 << 24)) == 0) {
6171
                        gen_op_addl_T1_im(-8);
6172
                    }
6173
                    gen_movl_T0_reg(s, 14);
6174
                    gen_ldst(stl, s);
6175
                    gen_op_movl_T0_cpsr();
6176
                    gen_op_addl_T1_im(4);
6177
                    gen_ldst(stl, s);
6178
                    if (insn & (1 << 21)) {
6179
                        if ((insn & (1 << 24)) == 0) {
6180
                            gen_op_addl_T1_im(-4);
6181
                        } else {
6182
                            gen_op_addl_T1_im(4);
6183
                        }
6184
                        if (op == (env->uncached_cpsr & CPSR_M)) {
6185
                            gen_movl_reg_T1(s, 13);
6186
                        } else {
6187
                            gen_op_movl_r13_T1_banked(op);
6188
                        }
6189
                    }
6190
                }
6191
            } else {
6192
                int i;
6193
                /* Load/store multiple.  */
6194
                gen_movl_T1_reg(s, rn);
6195
                offset = 0;
6196
                for (i = 0; i < 16; i++) {
6197
                    if (insn & (1 << i))
6198
                        offset += 4;
6199
                }
6200
                if (insn & (1 << 24)) {
6201
                    gen_op_addl_T1_im(-offset);
6202
                }
6203

    
6204
                for (i = 0; i < 16; i++) {
6205
                    if ((insn & (1 << i)) == 0)
6206
                        continue;
6207
                    if (insn & (1 << 20)) {
6208
                        /* Load.  */
6209
                        gen_ldst(ldl, s);
6210
                        if (i == 15) {
6211
                            gen_bx(s);
6212
                        } else {
6213
                            gen_movl_reg_T0(s, i);
6214
                        }
6215
                    } else {
6216
                        /* Store.  */
6217
                        gen_movl_T0_reg(s, i);
6218
                        gen_ldst(stl, s);
6219
                    }
6220
                    gen_op_addl_T1_im(4);
6221
                }
6222
                if (insn & (1 << 21)) {
6223
                    /* Base register writeback.  */
6224
                    if (insn & (1 << 24)) {
6225
                        gen_op_addl_T1_im(-offset);
6226
                    }
6227
                    /* Fault if writeback register is in register list.  */
6228
                    if (insn & (1 << rn))
6229
                        goto illegal_op;
6230
                    gen_movl_reg_T1(s, rn);
6231
                }
6232
            }
6233
        }
6234
        break;
6235
    case 5: /* Data processing register constant shift.  */
6236
        if (rn == 15)
6237
            gen_op_movl_T0_im(0);
6238
        else
6239
            gen_movl_T0_reg(s, rn);
6240
        gen_movl_T1_reg(s, rm);
6241
        op = (insn >> 21) & 0xf;
6242
        shiftop = (insn >> 4) & 3;
6243
        shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6244
        conds = (insn & (1 << 20)) != 0;
6245
        logic_cc = (conds && thumb2_logic_op(op));
6246
        if (logic_cc) {
6247
            if (shift != 0) {
6248
                gen_shift_T1_im_cc[shiftop](shift);
6249
            } else if (shiftop != 0) {
6250
                gen_shift_T1_0_cc[shiftop]();
6251
            }
6252
        } else {
6253
            gen_arm_shift_im(cpu_T[1], shiftop, shift);
6254
        }
6255
        if (gen_thumb2_data_op(s, op, conds, 0))
6256
            goto illegal_op;
6257
        if (rd != 15)
6258
            gen_movl_reg_T0(s, rd);
6259
        break;
6260
    case 13: /* Misc data processing.  */
6261
        op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
6262
        if (op < 4 && (insn & 0xf000) != 0xf000)
6263
            goto illegal_op;
6264
        switch (op) {
6265
        case 0: /* Register controlled shift.  */
6266
            gen_movl_T0_reg(s, rm);
6267
            gen_movl_T1_reg(s, rn);
6268
            if ((insn & 0x70) != 0)
6269
                goto illegal_op;
6270
            op = (insn >> 21) & 3;
6271
            if (insn & (1 << 20)) {
6272
                gen_shift_T1_T0_cc[op]();
6273
                gen_op_logic_T1_cc();
6274
            } else {
6275
                gen_shift_T1_T0[op]();
6276
            }
6277
            gen_movl_reg_T1(s, rd);
6278
            break;
6279
        case 1: /* Sign/zero extend.  */
6280
            gen_movl_T1_reg(s, rm);
6281
            shift = (insn >> 4) & 3;
6282
            /* ??? In many cases it's not neccessary to do a
6283
               rotate, a shift is sufficient.  */
6284
            if (shift != 0)
6285
                gen_op_rorl_T1_im(shift * 8);
6286
            op = (insn >> 20) & 7;
6287
            switch (op) {
6288
            case 0: gen_sxth(cpu_T[1]);   break;
6289
            case 1: gen_uxth(cpu_T[1]);   break;
6290
            case 2: gen_sxtb16(cpu_T[1]); break;
6291
            case 3: gen_uxtb16(cpu_T[1]); break;
6292
            case 4: gen_sxtb(cpu_T[1]);   break;
6293
            case 5: gen_uxtb(cpu_T[1]);   break;
6294
            default: goto illegal_op;
6295
            }
6296
            if (rn != 15) {
6297
                tmp = load_reg(s, rn);
6298
                if ((op >> 1) == 1) {
6299
                    gen_add16(cpu_T[1], tmp);
6300
                } else {
6301
                    tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
6302
                    dead_tmp(tmp);
6303
                }
6304
            }
6305
            gen_movl_reg_T1(s, rd);
6306
            break;
6307
        case 2: /* SIMD add/subtract.  */
6308
            op = (insn >> 20) & 7;
6309
            shift = (insn >> 4) & 7;
6310
            if ((op & 3) == 3 || (shift & 3) == 3)
6311
                goto illegal_op;
6312
            gen_movl_T0_reg(s, rn);
6313
            gen_movl_T1_reg(s, rm);
6314
            gen_thumb2_parallel_addsub[op][shift]();
6315
            gen_movl_reg_T0(s, rd);
6316
            break;
6317
        case 3: /* Other data processing.  */
6318
            op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
6319
            if (op < 4) {
6320
                /* Saturating add/subtract.  */
6321
                gen_movl_T0_reg(s, rm);
6322
                gen_movl_T1_reg(s, rn);
6323
                if (op & 2)
6324
                    gen_op_double_T1_saturate();
6325
                if (op & 1)
6326
                    gen_op_subl_T0_T1_saturate();
6327
                else
6328
                    gen_op_addl_T0_T1_saturate();
6329
            } else {
6330
                gen_movl_T0_reg(s, rn);
6331
                switch (op) {
6332
                case 0x0a: /* rbit */
6333
                    gen_op_rbit_T0();
6334
                    break;
6335
                case 0x08: /* rev */
6336
                    gen_op_rev_T0();
6337
                    break;
6338
                case 0x09: /* rev16 */
6339
                    gen_op_rev16_T0();
6340
                    break;
6341
                case 0x0b: /* revsh */
6342
                    gen_op_revsh_T0();
6343
                    break;
6344
                case 0x10: /* sel */
6345
                    gen_movl_T1_reg(s, rm);
6346
                    gen_op_sel_T0_T1();
6347
                    break;
6348
                case 0x18: /* clz */
6349
                    gen_op_clz_T0();
6350
                    break;
6351
                default:
6352
                    goto illegal_op;
6353
                }
6354
            }
6355
            gen_movl_reg_T0(s, rd);
6356
            break;
6357
        case 4: case 5: /* 32-bit multiply.  Sum of absolute differences.  */
6358
            op = (insn >> 4) & 0xf;
6359
            gen_movl_T0_reg(s, rn);
6360
            gen_movl_T1_reg(s, rm);
6361
            switch ((insn >> 20) & 7) {
6362
            case 0: /* 32 x 32 -> 32 */
6363
                gen_op_mul_T0_T1();
6364
                if (rs != 15) {
6365
                    gen_movl_T1_reg(s, rs);
6366
                    if (op)
6367
                        gen_op_rsbl_T0_T1();
6368
                    else
6369
                        gen_op_addl_T0_T1();
6370
                }
6371
                gen_movl_reg_T0(s, rd);
6372
                break;
6373
            case 1: /* 16 x 16 -> 32 */
6374
                gen_mulxy(op & 2, op & 1);
6375
                if (rs != 15) {
6376
                    gen_movl_T1_reg(s, rs);
6377
                    gen_op_addl_T0_T1_setq();
6378
                }
6379
                gen_movl_reg_T0(s, rd);
6380
                break;
6381
            case 2: /* Dual multiply add.  */
6382
            case 4: /* Dual multiply subtract.  */
6383
                if (op)
6384
                    gen_op_swap_half_T1();
6385
                gen_op_mul_dual_T0_T1();
6386
                /* This addition cannot overflow.  */
6387
                if (insn & (1 << 22)) {
6388
                    gen_op_subl_T0_T1();
6389
                } else {
6390
                    gen_op_addl_T0_T1();
6391
                }
6392
                if (rs != 15)
6393
                  {
6394
                    gen_movl_T1_reg(s, rs);
6395
                    gen_op_addl_T0_T1_setq();
6396
                  }
6397
                gen_movl_reg_T0(s, rd);
6398
                break;
6399
            case 3: /* 32 * 16 -> 32msb */
6400
                if (op)
6401
                    gen_op_sarl_T1_im(16);
6402
                else
6403
                    gen_sxth(cpu_T[1]);
6404
                gen_op_imulw_T0_T1();
6405
                if (rs != 15)
6406
                  {
6407
                    gen_movl_T1_reg(s, rs);
6408
                    gen_op_addl_T0_T1_setq();
6409
                  }
6410
                gen_movl_reg_T0(s, rd);
6411
                break;
6412
            case 5: case 6: /* 32 * 32 -> 32msb */
6413
                gen_op_imull_T0_T1();
6414
                if (insn & (1 << 5))
6415
                    gen_op_roundqd_T0_T1();
6416
                else
6417
                    gen_op_movl_T0_T1();
6418
                if (rs != 15) {
6419
                    gen_movl_T1_reg(s, rs);
6420
                    if (insn & (1 << 21)) {
6421
                        gen_op_addl_T0_T1();
6422
                    } else {
6423
                        gen_op_rsbl_T0_T1();
6424
                    }
6425
                }
6426
                gen_movl_reg_T0(s, rd);
6427
                break;
6428
            case 7: /* Unsigned sum of absolute differences.  */
6429
                gen_op_usad8_T0_T1();
6430
                if (rs != 15) {
6431
                    gen_movl_T1_reg(s, rs);
6432
                    gen_op_addl_T0_T1();
6433
                }
6434
                gen_movl_reg_T0(s, rd);
6435
                break;
6436
            }
6437
            break;
6438
        case 6: case 7: /* 64-bit multiply, Divide.  */
6439
            op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
6440
            gen_movl_T0_reg(s, rn);
6441
            gen_movl_T1_reg(s, rm);
6442
            if ((op & 0x50) == 0x10) {
6443
                /* sdiv, udiv */
6444
                if (!arm_feature(env, ARM_FEATURE_DIV))
6445
                    goto illegal_op;
6446
                if (op & 0x20)
6447
                    gen_op_udivl_T0_T1();
6448
                else
6449
                    gen_op_sdivl_T0_T1();
6450
                gen_movl_reg_T0(s, rd);
6451
            } else if ((op & 0xe) == 0xc) {
6452
                /* Dual multiply accumulate long.  */
6453
                if (op & 1)
6454
                    gen_op_swap_half_T1();
6455
                gen_op_mul_dual_T0_T1();
6456
                if (op & 0x10) {
6457
                    gen_op_subl_T0_T1();
6458
                } else {
6459
                    gen_op_addl_T0_T1();
6460
                }
6461
                gen_op_signbit_T1_T0();
6462
                gen_op_addq_T0_T1(rs, rd);
6463
                gen_movl_reg_T0(s, rs);
6464
                gen_movl_reg_T1(s, rd);
6465
            } else {
6466
                if (op & 0x20) {
6467
                    /* Unsigned 64-bit multiply  */
6468
                    gen_op_mull_T0_T1();
6469
                } else {
6470
                    if (op & 8) {
6471
                        /* smlalxy */
6472
                        gen_mulxy(op & 2, op & 1);
6473
                        gen_op_signbit_T1_T0();
6474
                    } else {
6475
                        /* Signed 64-bit multiply  */
6476
                        gen_op_imull_T0_T1();
6477
                    }
6478
                }
6479
                if (op & 4) {
6480
                    /* umaal */
6481
                    gen_op_addq_lo_T0_T1(rs);
6482
                    gen_op_addq_lo_T0_T1(rd);
6483
                } else if (op & 0x40) {
6484
                    /* 64-bit accumulate.  */
6485
                    gen_op_addq_T0_T1(rs, rd);
6486
                }
6487
                gen_movl_reg_T0(s, rs);
6488
                gen_movl_reg_T1(s, rd);
6489
            }
6490
            break;
6491
        }
6492
        break;
6493
    case 6: case 7: case 14: case 15:
6494
        /* Coprocessor.  */
6495
        if (((insn >> 24) & 3) == 3) {
6496
            /* Translate into the equivalent ARM encoding.  */
6497
            insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
6498
            if (disas_neon_data_insn(env, s, insn))
6499
                goto illegal_op;
6500
        } else {
6501
            if (insn & (1 << 28))
6502
                goto illegal_op;
6503
            if (disas_coproc_insn (env, s, insn))
6504
                goto illegal_op;
6505
        }
6506
        break;
6507
    case 8: case 9: case 10: case 11:
6508
        if (insn & (1 << 15)) {
6509
            /* Branches, misc control.  */
6510
            if (insn & 0x5000) {
6511
                /* Unconditional branch.  */
6512
                /* signextend(hw1[10:0]) -> offset[:12].  */
6513
                offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
6514
                /* hw1[10:0] -> offset[11:1].  */
6515
                offset |= (insn & 0x7ff) << 1;
6516
                /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
6517
                   offset[24:22] already have the same value because of the
6518
                   sign extension above.  */
6519
                offset ^= ((~insn) & (1 << 13)) << 10;
6520
                offset ^= ((~insn) & (1 << 11)) << 11;
6521

    
6522
                addr = s->pc;
6523
                if (insn & (1 << 14)) {
6524
                    /* Branch and link.  */
6525
                    gen_op_movl_T1_im(addr | 1);
6526
                    gen_movl_reg_T1(s, 14);
6527
                }
6528

    
6529
                addr += offset;
6530
                if (insn & (1 << 12)) {
6531
                    /* b/bl */
6532
                    gen_jmp(s, addr);
6533
                } else {
6534
                    /* blx */
6535
                    addr &= ~(uint32_t)2;
6536
                    gen_op_movl_T0_im(addr);
6537
                    gen_bx(s);
6538
                }
6539
            } else if (((insn >> 23) & 7) == 7) {
6540
                /* Misc control */
6541
                if (insn & (1 << 13))
6542
                    goto illegal_op;
6543

    
6544
                if (insn & (1 << 26)) {
6545
                    /* Secure monitor call (v6Z) */
6546
                    goto illegal_op; /* not implemented.  */
6547
                } else {
6548
                    op = (insn >> 20) & 7;
6549
                    switch (op) {
6550
                    case 0: /* msr cpsr.  */
6551
                        if (IS_M(env)) {
6552
                            gen_op_v7m_msr_T0(insn & 0xff);
6553
                            gen_movl_reg_T0(s, rn);
6554
                            gen_lookup_tb(s);
6555
                            break;
6556
                        }
6557
                        /* fall through */
6558
                    case 1: /* msr spsr.  */
6559
                        if (IS_M(env))
6560
                            goto illegal_op;
6561
                        gen_movl_T0_reg(s, rn);
6562
                        if (gen_set_psr_T0(s,
6563
                              msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
6564
                              op == 1))
6565
                            goto illegal_op;
6566
                        break;
6567
                    case 2: /* cps, nop-hint.  */
6568
                        if (((insn >> 8) & 7) == 0) {
6569
                            gen_nop_hint(s, insn & 0xff);
6570
                        }
6571
                        /* Implemented as NOP in user mode.  */
6572
                        if (IS_USER(s))
6573
                            break;
6574
                        offset = 0;
6575
                        imm = 0;
6576
                        if (insn & (1 << 10)) {
6577
                            if (insn & (1 << 7))
6578
                                offset |= CPSR_A;
6579
                            if (insn & (1 << 6))
6580
                                offset |= CPSR_I;
6581
                            if (insn & (1 << 5))
6582
                                offset |= CPSR_F;
6583
                            if (insn & (1 << 9))
6584
                                imm = CPSR_A | CPSR_I | CPSR_F;
6585
                        }
6586
                        if (insn & (1 << 8)) {
6587
                            offset |= 0x1f;
6588
                            imm |= (insn & 0x1f);
6589
                        }
6590
                        if (offset) {
6591
                            gen_op_movl_T0_im(imm);
6592
                            gen_set_psr_T0(s, offset, 0);
6593
                        }
6594
                        break;
6595
                    case 3: /* Special control operations.  */
6596
                        op = (insn >> 4) & 0xf;
6597
                        switch (op) {
6598
                        case 2: /* clrex */
6599
                            gen_op_clrex();
6600
                            break;
6601
                        case 4: /* dsb */
6602
                        case 5: /* dmb */
6603
                        case 6: /* isb */
6604
                            /* These execute as NOPs.  */
6605
                            ARCH(7);
6606
                            break;
6607
                        default:
6608
                            goto illegal_op;
6609
                        }
6610
                        break;
6611
                    case 4: /* bxj */
6612
                        /* Trivial implementation equivalent to bx.  */
6613
                        gen_movl_T0_reg(s, rn);
6614
                        gen_bx(s);
6615
                        break;
6616
                    case 5: /* Exception return.  */
6617
                        /* Unpredictable in user mode.  */
6618
                        goto illegal_op;
6619
                    case 6: /* mrs cpsr.  */
6620
                        if (IS_M(env)) {
6621
                            gen_op_v7m_mrs_T0(insn & 0xff);
6622
                        } else {
6623
                            gen_op_movl_T0_cpsr();
6624
                        }
6625
                        gen_movl_reg_T0(s, rd);
6626
                        break;
6627
                    case 7: /* mrs spsr.  */
6628
                        /* Not accessible in user mode.  */
6629
                        if (IS_USER(s) || IS_M(env))
6630
                            goto illegal_op;
6631
                        gen_op_movl_T0_spsr();
6632
                        gen_movl_reg_T0(s, rd);
6633
                        break;
6634
                    }
6635
                }
6636
            } else {
6637
                /* Conditional branch.  */
6638
                op = (insn >> 22) & 0xf;
6639
                /* Generate a conditional jump to next instruction.  */
6640
                s->condlabel = gen_new_label();
6641
                gen_test_cc[op ^ 1](s->condlabel);
6642
                s->condjmp = 1;
6643

    
6644
                /* offset[11:1] = insn[10:0] */
6645
                offset = (insn & 0x7ff) << 1;
6646
                /* offset[17:12] = insn[21:16].  */
6647
                offset |= (insn & 0x003f0000) >> 4;
6648
                /* offset[31:20] = insn[26].  */
6649
                offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
6650
                /* offset[18] = insn[13].  */
6651
                offset |= (insn & (1 << 13)) << 5;
6652
                /* offset[19] = insn[11].  */
6653
                offset |= (insn & (1 << 11)) << 8;
6654

    
6655
                /* jump to the offset */
6656
                addr = s->pc + offset;
6657
                gen_jmp(s, addr);
6658
            }
6659
        } else {
6660
            /* Data processing immediate.  */
6661
            if (insn & (1 << 25)) {
6662
                if (insn & (1 << 24)) {
6663
                    if (insn & (1 << 20))
6664
                        goto illegal_op;
6665
                    /* Bitfield/Saturate.  */
6666
                    op = (insn >> 21) & 7;
6667
                    imm = insn & 0x1f;
6668
                    shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6669
                    if (rn == 15)
6670
                        gen_op_movl_T1_im(0);
6671
                    else
6672
                        gen_movl_T1_reg(s, rn);
6673
                    switch (op) {
6674
                    case 2: /* Signed bitfield extract.  */
6675
                        imm++;
6676
                        if (shift + imm > 32)
6677
                            goto illegal_op;
6678
                        if (imm < 32)
6679
                            gen_op_sbfx_T1(shift, imm);
6680
                        break;
6681
                    case 6: /* Unsigned bitfield extract.  */
6682
                        imm++;
6683
                        if (shift + imm > 32)
6684
                            goto illegal_op;
6685
                        if (imm < 32)
6686
                            gen_op_ubfx_T1(shift, (1u << imm) - 1);
6687
                        break;
6688
                    case 3: /* Bitfield insert/clear.  */
6689
                        if (imm < shift)
6690
                            goto illegal_op;
6691
                        imm = imm + 1 - shift;
6692
                        if (imm != 32) {
6693
                            gen_movl_T0_reg(s, rd);
6694
                            gen_op_bfi_T1_T0(shift, ((1u << imm) - 1) << shift);
6695
                        }
6696
                        break;
6697
                    case 7:
6698
                        goto illegal_op;
6699
                    default: /* Saturate.  */
6700
                        gen_movl_T1_reg(s, rn);
6701
                        if (shift) {
6702
                            if (op & 1)
6703
                                gen_op_sarl_T1_im(shift);
6704
                            else
6705
                                gen_op_shll_T1_im(shift);
6706
                        }
6707
                        if (op & 4) {
6708
                            /* Unsigned.  */
6709
                            gen_op_ssat_T1(imm);
6710
                            if ((op & 1) && shift == 0)
6711
                                gen_op_usat16_T1(imm);
6712
                            else
6713
                                gen_op_usat_T1(imm);
6714
                        } else {
6715
                            /* Signed.  */
6716
                            gen_op_ssat_T1(imm);
6717
                            if ((op & 1) && shift == 0)
6718
                                gen_op_ssat16_T1(imm);
6719
                            else
6720
                                gen_op_ssat_T1(imm);
6721
                        }
6722
                        break;
6723
                    }
6724
                    gen_movl_reg_T1(s, rd);
6725
                } else {
6726
                    imm = ((insn & 0x04000000) >> 15)
6727
                          | ((insn & 0x7000) >> 4) | (insn & 0xff);
6728
                    if (insn & (1 << 22)) {
6729
                        /* 16-bit immediate.  */
6730
                        imm |= (insn >> 4) & 0xf000;
6731
                        if (insn & (1 << 23)) {
6732
                            /* movt */
6733
                            gen_movl_T0_reg(s, rd);
6734
                            gen_op_movtop_T0_im(imm << 16);
6735
                        } else {
6736
                            /* movw */
6737
                            gen_op_movl_T0_im(imm);
6738
                        }
6739
                    } else {
6740
                        /* Add/sub 12-bit immediate.  */
6741
                        if (rn == 15) {
6742
                            addr = s->pc & ~(uint32_t)3;
6743
                            if (insn & (1 << 23))
6744
                                addr -= imm;
6745
                            else
6746
                                addr += imm;
6747
                            gen_op_movl_T0_im(addr);
6748
                        } else {
6749
                            gen_movl_T0_reg(s, rn);
6750
                            gen_op_movl_T1_im(imm);
6751
                            if (insn & (1 << 23))
6752
                                gen_op_subl_T0_T1();
6753
                            else
6754
                                gen_op_addl_T0_T1();
6755
                        }
6756
                    }
6757
                    gen_movl_reg_T0(s, rd);
6758
                }
6759
            } else {
6760
                int shifter_out = 0;
6761
                /* modified 12-bit immediate.  */
6762
                shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
6763
                imm = (insn & 0xff);
6764
                switch (shift) {
6765
                case 0: /* XY */
6766
                    /* Nothing to do.  */
6767
                    break;
6768
                case 1: /* 00XY00XY */
6769
                    imm |= imm << 16;
6770
                    break;
6771
                case 2: /* XY00XY00 */
6772
                    imm |= imm << 16;
6773
                    imm <<= 8;
6774
                    break;
6775
                case 3: /* XYXYXYXY */
6776
                    imm |= imm << 16;
6777
                    imm |= imm << 8;
6778
                    break;
6779
                default: /* Rotated constant.  */
6780
                    shift = (shift << 1) | (imm >> 7);
6781
                    imm |= 0x80;
6782
                    imm = imm << (32 - shift);
6783
                    shifter_out = 1;
6784
                    break;
6785
                }
6786
                gen_op_movl_T1_im(imm);
6787
                rn = (insn >> 16) & 0xf;
6788
                if (rn == 15)
6789
                    gen_op_movl_T0_im(0);
6790
                else
6791
                    gen_movl_T0_reg(s, rn);
6792
                op = (insn >> 21) & 0xf;
6793
                if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
6794
                                       shifter_out))
6795
                    goto illegal_op;
6796
                rd = (insn >> 8) & 0xf;
6797
                if (rd != 15) {
6798
                    gen_movl_reg_T0(s, rd);
6799
                }
6800
            }
6801
        }
6802
        break;
6803
    case 12: /* Load/store single data item.  */
6804
        {
6805
        int postinc = 0;
6806
        int writeback = 0;
6807
        if ((insn & 0x01100000) == 0x01000000) {
6808
            if (disas_neon_ls_insn(env, s, insn))
6809
                goto illegal_op;
6810
            break;
6811
        }
6812
        if (rn == 15) {
6813
            /* PC relative.  */
6814
            /* s->pc has already been incremented by 4.  */
6815
            imm = s->pc & 0xfffffffc;
6816
            if (insn & (1 << 23))
6817
                imm += insn & 0xfff;
6818
            else
6819
                imm -= insn & 0xfff;
6820
            gen_op_movl_T1_im(imm);
6821
        } else {
6822
            gen_movl_T1_reg(s, rn);
6823
            if (insn & (1 << 23)) {
6824
                /* Positive offset.  */
6825
                imm = insn & 0xfff;
6826
                gen_op_addl_T1_im(imm);
6827
            } else {
6828
                op = (insn >> 8) & 7;
6829
                imm = insn & 0xff;
6830
                switch (op) {
6831
                case 0: case 8: /* Shifted Register.  */
6832
                    shift = (insn >> 4) & 0xf;
6833
                    if (shift > 3)
6834
                        goto illegal_op;
6835
                    tmp = load_reg(s, rm);
6836
                    if (shift)
6837
                        tcg_gen_shli_i32(tmp, tmp, shift);
6838
                    tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
6839
                    dead_tmp(tmp);
6840
                    break;
6841
                case 4: /* Negative offset.  */
6842
                    gen_op_addl_T1_im(-imm);
6843
                    break;
6844
                case 6: /* User privilege.  */
6845
                    gen_op_addl_T1_im(imm);
6846
                    break;
6847
                case 1: /* Post-decrement.  */
6848
                    imm = -imm;
6849
                    /* Fall through.  */
6850
                case 3: /* Post-increment.  */
6851
                    postinc = 1;
6852
                    writeback = 1;
6853
                    break;
6854
                case 5: /* Pre-decrement.  */
6855
                    imm = -imm;
6856
                    /* Fall through.  */
6857
                case 7: /* Pre-increment.  */
6858
                    gen_op_addl_T1_im(imm);
6859
                    writeback = 1;
6860
                    break;
6861
                default:
6862
                    goto illegal_op;
6863
                }
6864
            }
6865
        }
6866
        op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
6867
        if (insn & (1 << 20)) {
6868
            /* Load.  */
6869
            if (rs == 15 && op != 2) {
6870
                if (op & 2)
6871
                    goto illegal_op;
6872
                /* Memory hint.  Implemented as NOP.  */
6873
            } else {
6874
                switch (op) {
6875
                case 0: gen_ldst(ldub, s); break;
6876
                case 4: gen_ldst(ldsb, s); break;
6877
                case 1: gen_ldst(lduw, s); break;
6878
                case 5: gen_ldst(ldsw, s); break;
6879
                case 2: gen_ldst(ldl, s); break;
6880
                default: goto illegal_op;
6881
                }
6882
                if (rs == 15) {
6883
                    gen_bx(s);
6884
                } else {
6885
                    gen_movl_reg_T0(s, rs);
6886
                }
6887
            }
6888
        } else {
6889
            /* Store.  */
6890
            if (rs == 15)
6891
                goto illegal_op;
6892
            gen_movl_T0_reg(s, rs);
6893
            switch (op) {
6894
            case 0: gen_ldst(stb, s); break;
6895
            case 1: gen_ldst(stw, s); break;
6896
            case 2: gen_ldst(stl, s); break;
6897
            default: goto illegal_op;
6898
            }
6899
        }
6900
        if (postinc)
6901
            gen_op_addl_T1_im(imm);
6902
        if (writeback)
6903
            gen_movl_reg_T1(s, rn);
6904
        }
6905
        break;
6906
    default:
6907
        goto illegal_op;
6908
    }
6909
    return 0;
6910
illegal_op:
6911
    return 1;
6912
}
6913

    
6914
static void disas_thumb_insn(CPUState *env, DisasContext *s)
6915
{
6916
    uint32_t val, insn, op, rm, rn, rd, shift, cond;
6917
    int32_t offset;
6918
    int i;
6919
    TCGv tmp;
6920

    
6921
    if (s->condexec_mask) {
6922
        cond = s->condexec_cond;
6923
        s->condlabel = gen_new_label();
6924
        gen_test_cc[cond ^ 1](s->condlabel);
6925
        s->condjmp = 1;
6926
    }
6927

    
6928
    insn = lduw_code(s->pc);
6929
    s->pc += 2;
6930

    
6931
    switch (insn >> 12) {
6932
    case 0: case 1:
6933
        rd = insn & 7;
6934
        op = (insn >> 11) & 3;
6935
        if (op == 3) {
6936
            /* add/subtract */
6937
            rn = (insn >> 3) & 7;
6938
            gen_movl_T0_reg(s, rn);
6939
            if (insn & (1 << 10)) {
6940
                /* immediate */
6941
                gen_op_movl_T1_im((insn >> 6) & 7);
6942
            } else {
6943
                /* reg */
6944
                rm = (insn >> 6) & 7;
6945
                gen_movl_T1_reg(s, rm);
6946
            }
6947
            if (insn & (1 << 9)) {
6948
                if (s->condexec_mask)
6949
                    gen_op_subl_T0_T1();
6950
                else
6951
                    gen_op_subl_T0_T1_cc();
6952
            } else {
6953
                if (s->condexec_mask)
6954
                    gen_op_addl_T0_T1();
6955
                else
6956
                    gen_op_addl_T0_T1_cc();
6957
            }
6958
            gen_movl_reg_T0(s, rd);
6959
        } else {
6960
            /* shift immediate */
6961
            rm = (insn >> 3) & 7;
6962
            shift = (insn >> 6) & 0x1f;
6963
            gen_movl_T0_reg(s, rm);
6964
            if (s->condexec_mask)
6965
                gen_shift_T0_im_thumb[op](shift);
6966
            else
6967
                gen_shift_T0_im_thumb_cc[op](shift);
6968
            gen_movl_reg_T0(s, rd);
6969
        }
6970
        break;
6971
    case 2: case 3:
6972
        /* arithmetic large immediate */
6973
        op = (insn >> 11) & 3;
6974
        rd = (insn >> 8) & 0x7;
6975
        if (op == 0) {
6976
            gen_op_movl_T0_im(insn & 0xff);
6977
        } else {
6978
            gen_movl_T0_reg(s, rd);
6979
            gen_op_movl_T1_im(insn & 0xff);
6980
        }
6981
        switch (op) {
6982
        case 0: /* mov */
6983
            if (!s->condexec_mask)
6984
                gen_op_logic_T0_cc();
6985
            break;
6986
        case 1: /* cmp */
6987
            gen_op_subl_T0_T1_cc();
6988
            break;
6989
        case 2: /* add */
6990
            if (s->condexec_mask)
6991
                gen_op_addl_T0_T1();
6992
            else
6993
                gen_op_addl_T0_T1_cc();
6994
            break;
6995
        case 3: /* sub */
6996
            if (s->condexec_mask)
6997
                gen_op_subl_T0_T1();
6998
            else
6999
                gen_op_subl_T0_T1_cc();
7000
            break;
7001
        }
7002
        if (op != 1)
7003
            gen_movl_reg_T0(s, rd);
7004
        break;
7005
    case 4:
7006
        if (insn & (1 << 11)) {
7007
            rd = (insn >> 8) & 7;
7008
            /* load pc-relative.  Bit 1 of PC is ignored.  */
7009
            val = s->pc + 2 + ((insn & 0xff) * 4);
7010
            val &= ~(uint32_t)2;
7011
            gen_op_movl_T1_im(val);
7012
            gen_ldst(ldl, s);
7013
            gen_movl_reg_T0(s, rd);
7014
            break;
7015
        }
7016
        if (insn & (1 << 10)) {
7017
            /* data processing extended or blx */
7018
            rd = (insn & 7) | ((insn >> 4) & 8);
7019
            rm = (insn >> 3) & 0xf;
7020
            op = (insn >> 8) & 3;
7021
            switch (op) {
7022
            case 0: /* add */
7023
                gen_movl_T0_reg(s, rd);
7024
                gen_movl_T1_reg(s, rm);
7025
                gen_op_addl_T0_T1();
7026
                gen_movl_reg_T0(s, rd);
7027
                break;
7028
            case 1: /* cmp */
7029
                gen_movl_T0_reg(s, rd);
7030
                gen_movl_T1_reg(s, rm);
7031
                gen_op_subl_T0_T1_cc();
7032
                break;
7033
            case 2: /* mov/cpy */
7034
                gen_movl_T0_reg(s, rm);
7035
                gen_movl_reg_T0(s, rd);
7036
                break;
7037
            case 3:/* branch [and link] exchange thumb register */
7038
                if (insn & (1 << 7)) {
7039
                    val = (uint32_t)s->pc | 1;
7040
                    gen_op_movl_T1_im(val);
7041
                    gen_movl_reg_T1(s, 14);
7042
                }
7043
                gen_movl_T0_reg(s, rm);
7044
                gen_bx(s);
7045
                break;
7046
            }
7047
            break;
7048
        }
7049

    
7050
        /* data processing register */
7051
        rd = insn & 7;
7052
        rm = (insn >> 3) & 7;
7053
        op = (insn >> 6) & 0xf;
7054
        if (op == 2 || op == 3 || op == 4 || op == 7) {
7055
            /* the shift/rotate ops want the operands backwards */
7056
            val = rm;
7057
            rm = rd;
7058
            rd = val;
7059
            val = 1;
7060
        } else {
7061
            val = 0;
7062
        }
7063

    
7064
        if (op == 9) /* neg */
7065
            gen_op_movl_T0_im(0);
7066
        else if (op != 0xf) /* mvn doesn't read its first operand */
7067
            gen_movl_T0_reg(s, rd);
7068

    
7069
        gen_movl_T1_reg(s, rm);
7070
        switch (op) {
7071
        case 0x0: /* and */
7072
            gen_op_andl_T0_T1();
7073
            if (!s->condexec_mask)
7074
                gen_op_logic_T0_cc();
7075
            break;
7076
        case 0x1: /* eor */
7077
            gen_op_xorl_T0_T1();
7078
            if (!s->condexec_mask)
7079
                gen_op_logic_T0_cc();
7080
            break;
7081
        case 0x2: /* lsl */
7082
            if (s->condexec_mask) {
7083
                gen_op_shll_T1_T0();
7084
            } else {
7085
                gen_op_shll_T1_T0_cc();
7086
                gen_op_logic_T1_cc();
7087
            }
7088
            break;
7089
        case 0x3: /* lsr */
7090
            if (s->condexec_mask) {
7091
                gen_op_shrl_T1_T0();
7092
            } else {
7093
                gen_op_shrl_T1_T0_cc();
7094
                gen_op_logic_T1_cc();
7095
            }
7096
            break;
7097
        case 0x4: /* asr */
7098
            if (s->condexec_mask) {
7099
                gen_op_sarl_T1_T0();
7100
            } else {
7101
                gen_op_sarl_T1_T0_cc();
7102
                gen_op_logic_T1_cc();
7103
            }
7104
            break;
7105
        case 0x5: /* adc */
7106
            if (s->condexec_mask)
7107
                gen_adc_T0_T1();
7108
            else
7109
                gen_op_adcl_T0_T1_cc();
7110
            break;
7111
        case 0x6: /* sbc */
7112
            if (s->condexec_mask)
7113
                gen_op_sbcl_T0_T1();
7114
            else
7115
                gen_op_sbcl_T0_T1_cc();
7116
            break;
7117
        case 0x7: /* ror */
7118
            if (s->condexec_mask) {
7119
                gen_op_rorl_T1_T0();
7120
            } else {
7121
                gen_op_rorl_T1_T0_cc();
7122
                gen_op_logic_T1_cc();
7123
            }
7124
            break;
7125
        case 0x8: /* tst */
7126
            gen_op_andl_T0_T1();
7127
            gen_op_logic_T0_cc();
7128
            rd = 16;
7129
            break;
7130
        case 0x9: /* neg */
7131
            if (s->condexec_mask)
7132
                gen_op_subl_T0_T1();
7133
            else
7134
                gen_op_subl_T0_T1_cc();
7135
            break;
7136
        case 0xa: /* cmp */
7137
            gen_op_subl_T0_T1_cc();
7138
            rd = 16;
7139
            break;
7140
        case 0xb: /* cmn */
7141
            gen_op_addl_T0_T1_cc();
7142
            rd = 16;
7143
            break;
7144
        case 0xc: /* orr */
7145
            gen_op_orl_T0_T1();
7146
            if (!s->condexec_mask)
7147
                gen_op_logic_T0_cc();
7148
            break;
7149
        case 0xd: /* mul */
7150
            gen_op_mull_T0_T1();
7151
            if (!s->condexec_mask)
7152
                gen_op_logic_T0_cc();
7153
            break;
7154
        case 0xe: /* bic */
7155
            gen_op_bicl_T0_T1();
7156
            if (!s->condexec_mask)
7157
                gen_op_logic_T0_cc();
7158
            break;
7159
        case 0xf: /* mvn */
7160
            gen_op_notl_T1();
7161
            if (!s->condexec_mask)
7162
                gen_op_logic_T1_cc();
7163
            val = 1;
7164
            rm = rd;
7165
            break;
7166
        }
7167
        if (rd != 16) {
7168
            if (val)
7169
                gen_movl_reg_T1(s, rm);
7170
            else
7171
                gen_movl_reg_T0(s, rd);
7172
        }
7173
        break;
7174

    
7175
    case 5:
7176
        /* load/store register offset.  */
7177
        rd = insn & 7;
7178
        rn = (insn >> 3) & 7;
7179
        rm = (insn >> 6) & 7;
7180
        op = (insn >> 9) & 7;
7181
        gen_movl_T1_reg(s, rn);
7182
        tmp = load_reg(s, rm);
7183
        tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
7184
        dead_tmp(tmp);
7185

    
7186
        if (op < 3) /* store */
7187
            gen_movl_T0_reg(s, rd);
7188

    
7189
        switch (op) {
7190
        case 0: /* str */
7191
            gen_ldst(stl, s);
7192
            break;
7193
        case 1: /* strh */
7194
            gen_ldst(stw, s);
7195
            break;
7196
        case 2: /* strb */
7197
            gen_ldst(stb, s);
7198
            break;
7199
        case 3: /* ldrsb */
7200
            gen_ldst(ldsb, s);
7201
            break;
7202
        case 4: /* ldr */
7203
            gen_ldst(ldl, s);
7204
            break;
7205
        case 5: /* ldrh */
7206
            gen_ldst(lduw, s);
7207
            break;
7208
        case 6: /* ldrb */
7209
            gen_ldst(ldub, s);
7210
            break;
7211
        case 7: /* ldrsh */
7212
            gen_ldst(ldsw, s);
7213
            break;
7214
        }
7215
        if (op >= 3) /* load */
7216
            gen_movl_reg_T0(s, rd);
7217
        break;
7218

    
7219
    case 6:
7220
        /* load/store word immediate offset */
7221
        rd = insn & 7;
7222
        rn = (insn >> 3) & 7;
7223
        gen_movl_T1_reg(s, rn);
7224
        val = (insn >> 4) & 0x7c;
7225
        tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
7226

    
7227
        if (insn & (1 << 11)) {
7228
            /* load */
7229
            gen_ldst(ldl, s);
7230
            gen_movl_reg_T0(s, rd);
7231
        } else {
7232
            /* store */
7233
            gen_movl_T0_reg(s, rd);
7234
            gen_ldst(stl, s);
7235
        }
7236
        break;
7237

    
7238
    case 7:
7239
        /* load/store byte immediate offset */
7240
        rd = insn & 7;
7241
        rn = (insn >> 3) & 7;
7242
        gen_movl_T1_reg(s, rn);
7243
        val = (insn >> 6) & 0x1f;
7244
        tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
7245

    
7246
        if (insn & (1 << 11)) {
7247
            /* load */
7248
            gen_ldst(ldub, s);
7249
            gen_movl_reg_T0(s, rd);
7250
        } else {
7251
            /* store */
7252
            gen_movl_T0_reg(s, rd);
7253
            gen_ldst(stb, s);
7254
        }
7255
        break;
7256

    
7257
    case 8:
7258
        /* load/store halfword immediate offset */
7259
        rd = insn & 7;
7260
        rn = (insn >> 3) & 7;
7261
        gen_movl_T1_reg(s, rn);
7262
        val = (insn >> 5) & 0x3e;
7263
        tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
7264

    
7265
        if (insn & (1 << 11)) {
7266
            /* load */
7267
            gen_ldst(lduw, s);
7268
            gen_movl_reg_T0(s, rd);
7269
        } else {
7270
            /* store */
7271
            gen_movl_T0_reg(s, rd);
7272
            gen_ldst(stw, s);
7273
        }
7274
        break;
7275

    
7276
    case 9:
7277
        /* load/store from stack */
7278
        rd = (insn >> 8) & 7;
7279
        gen_movl_T1_reg(s, 13);
7280
        val = (insn & 0xff) * 4;
7281
        tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
7282

    
7283
        if (insn & (1 << 11)) {
7284
            /* load */
7285
            gen_ldst(ldl, s);
7286
            gen_movl_reg_T0(s, rd);
7287
        } else {
7288
            /* store */
7289
            gen_movl_T0_reg(s, rd);
7290
            gen_ldst(stl, s);
7291
        }
7292
        break;
7293

    
7294
    case 10:
7295
        /* add to high reg */
7296
        rd = (insn >> 8) & 7;
7297
        if (insn & (1 << 11)) {
7298
            /* SP */
7299
            gen_movl_T0_reg(s, 13);
7300
        } else {
7301
            /* PC. bit 1 is ignored.  */
7302
            gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
7303
        }
7304
        val = (insn & 0xff) * 4;
7305
        gen_op_movl_T1_im(val);
7306
        gen_op_addl_T0_T1();
7307
        gen_movl_reg_T0(s, rd);
7308
        break;
7309

    
7310
    case 11:
7311
        /* misc */
7312
        op = (insn >> 8) & 0xf;
7313
        switch (op) {
7314
        case 0:
7315
            /* adjust stack pointer */
7316
            tmp = load_reg(s, 13);
7317
            val = (insn & 0x7f) * 4;
7318
            if (insn & (1 << 7))
7319
              val = -(int32_t)val;
7320
            tcg_gen_addi_i32(tmp, tmp, val);
7321
            store_reg(s, 13, tmp);
7322
            break;
7323

    
7324
        case 2: /* sign/zero extend.  */
7325
            ARCH(6);
7326
            rd = insn & 7;
7327
            rm = (insn >> 3) & 7;
7328
            gen_movl_T1_reg(s, rm);
7329
            switch ((insn >> 6) & 3) {
7330
            case 0: gen_sxth(cpu_T[1]); break;
7331
            case 1: gen_sxtb(cpu_T[1]); break;
7332
            case 2: gen_uxth(cpu_T[1]); break;
7333
            case 3: gen_uxtb(cpu_T[1]); break;
7334
            }
7335
            gen_movl_reg_T1(s, rd);
7336
            break;
7337
        case 4: case 5: case 0xc: case 0xd:
7338
            /* push/pop */
7339
            gen_movl_T1_reg(s, 13);
7340
            if (insn & (1 << 8))
7341
                offset = 4;
7342
            else
7343
                offset = 0;
7344
            for (i = 0; i < 8; i++) {
7345
                if (insn & (1 << i))
7346
                    offset += 4;
7347
            }
7348
            if ((insn & (1 << 11)) == 0) {
7349
                gen_op_addl_T1_im(-offset);
7350
            }
7351
            for (i = 0; i < 8; i++) {
7352
                if (insn & (1 << i)) {
7353
                    if (insn & (1 << 11)) {
7354
                        /* pop */
7355
                        gen_ldst(ldl, s);
7356
                        gen_movl_reg_T0(s, i);
7357
                    } else {
7358
                        /* push */
7359
                        gen_movl_T0_reg(s, i);
7360
                        gen_ldst(stl, s);
7361
                    }
7362
                    /* advance to the next address.  */
7363
                    gen_op_addl_T1_im(4);
7364
                }
7365
            }
7366
            if (insn & (1 << 8)) {
7367
                if (insn & (1 << 11)) {
7368
                    /* pop pc */
7369
                    gen_ldst(ldl, s);
7370
                    /* don't set the pc until the rest of the instruction
7371
                       has completed */
7372
                } else {
7373
                    /* push lr */
7374
                    gen_movl_T0_reg(s, 14);
7375
                    gen_ldst(stl, s);
7376
                }
7377
                gen_op_addl_T1_im(4);
7378
            }
7379
            if ((insn & (1 << 11)) == 0) {
7380
                gen_op_addl_T1_im(-offset);
7381
            }
7382
            /* write back the new stack pointer */
7383
            gen_movl_reg_T1(s, 13);
7384
            /* set the new PC value */
7385
            if ((insn & 0x0900) == 0x0900)
7386
                gen_bx(s);
7387
            break;
7388

    
7389
        case 1: case 3: case 9: case 11: /* czb */
7390
            rm = insn & 7;
7391
            gen_movl_T0_reg(s, rm);
7392
            s->condlabel = gen_new_label();
7393
            s->condjmp = 1;
7394
            if (insn & (1 << 11))
7395
                gen_op_testn_T0(s->condlabel);
7396
            else
7397
                gen_op_test_T0(s->condlabel);
7398

    
7399
            offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
7400
            val = (uint32_t)s->pc + 2;
7401
            val += offset;
7402
            gen_jmp(s, val);
7403
            break;
7404

    
7405
        case 15: /* IT, nop-hint.  */
7406
            if ((insn & 0xf) == 0) {
7407
                gen_nop_hint(s, (insn >> 4) & 0xf);
7408
                break;
7409
            }
7410
            /* If Then.  */
7411
            s->condexec_cond = (insn >> 4) & 0xe;
7412
            s->condexec_mask = insn & 0x1f;
7413
            /* No actual code generated for this insn, just setup state.  */
7414
            break;
7415

    
7416
        case 0xe: /* bkpt */
7417
            gen_set_condexec(s);
7418
            gen_op_movl_T0_im((long)s->pc - 2);
7419
            gen_set_pc_T0();
7420
            gen_op_bkpt();
7421
            s->is_jmp = DISAS_JUMP;
7422
            break;
7423

    
7424
        case 0xa: /* rev */
7425
            ARCH(6);
7426
            rn = (insn >> 3) & 0x7;
7427
            rd = insn & 0x7;
7428
            gen_movl_T0_reg(s, rn);
7429
            switch ((insn >> 6) & 3) {
7430
            case 0: gen_op_rev_T0(); break;
7431
            case 1: gen_op_rev16_T0(); break;
7432
            case 3: gen_op_revsh_T0(); break;
7433
            default: goto illegal_op;
7434
            }
7435
            gen_movl_reg_T0(s, rd);
7436
            break;
7437

    
7438
        case 6: /* cps */
7439
            ARCH(6);
7440
            if (IS_USER(s))
7441
                break;
7442
            if (IS_M(env)) {
7443
                val = (insn & (1 << 4)) != 0;
7444
                gen_op_movl_T0_im(val);
7445
                /* PRIMASK */
7446
                if (insn & 1)
7447
                    gen_op_v7m_msr_T0(16);
7448
                /* FAULTMASK */
7449
                if (insn & 2)
7450
                    gen_op_v7m_msr_T0(17);
7451

    
7452
                gen_lookup_tb(s);
7453
            } else {
7454
                if (insn & (1 << 4))
7455
                    shift = CPSR_A | CPSR_I | CPSR_F;
7456
                else
7457
                    shift = 0;
7458

    
7459
                val = ((insn & 7) << 6) & shift;
7460
                gen_op_movl_T0_im(val);
7461
                gen_set_psr_T0(s, shift, 0);
7462
            }
7463
            break;
7464

    
7465
        default:
7466
            goto undef;
7467
        }
7468
        break;
7469

    
7470
    case 12:
7471
        /* load/store multiple */
7472
        rn = (insn >> 8) & 0x7;
7473
        gen_movl_T1_reg(s, rn);
7474
        for (i = 0; i < 8; i++) {
7475
            if (insn & (1 << i)) {
7476
                if (insn & (1 << 11)) {
7477
                    /* load */
7478
                    gen_ldst(ldl, s);
7479
                    gen_movl_reg_T0(s, i);
7480
                } else {
7481
                    /* store */
7482
                    gen_movl_T0_reg(s, i);
7483
                    gen_ldst(stl, s);
7484
                }
7485
                /* advance to the next address */
7486
                gen_op_addl_T1_im(4);
7487
            }
7488
        }
7489
        /* Base register writeback.  */
7490
        if ((insn & (1 << rn)) == 0)
7491
            gen_movl_reg_T1(s, rn);
7492
        break;
7493

    
7494
    case 13:
7495
        /* conditional branch or swi */
7496
        cond = (insn >> 8) & 0xf;
7497
        if (cond == 0xe)
7498
            goto undef;
7499

    
7500
        if (cond == 0xf) {
7501
            /* swi */
7502
            gen_set_condexec(s);
7503
            gen_op_movl_T0_im((long)s->pc | 1);
7504
            /* Don't set r15.  */
7505
            gen_set_pc_T0();
7506
            s->is_jmp = DISAS_SWI;
7507
            break;
7508
        }
7509
        /* generate a conditional jump to next instruction */
7510
        s->condlabel = gen_new_label();
7511
        gen_test_cc[cond ^ 1](s->condlabel);
7512
        s->condjmp = 1;
7513
        gen_movl_T1_reg(s, 15);
7514

    
7515
        /* jump to the offset */
7516
        val = (uint32_t)s->pc + 2;
7517
        offset = ((int32_t)insn << 24) >> 24;
7518
        val += offset << 1;
7519
        gen_jmp(s, val);
7520
        break;
7521

    
7522
    case 14:
7523
        if (insn & (1 << 11)) {
7524
            if (disas_thumb2_insn(env, s, insn))
7525
              goto undef32;
7526
            break;
7527
        }
7528
        /* unconditional branch */
7529
        val = (uint32_t)s->pc;
7530
        offset = ((int32_t)insn << 21) >> 21;
7531
        val += (offset << 1) + 2;
7532
        gen_jmp(s, val);
7533
        break;
7534

    
7535
    case 15:
7536
        if (disas_thumb2_insn(env, s, insn))
7537
          goto undef32;
7538
        break;
7539
    }
7540
    return;
7541
undef32:
7542
    gen_set_condexec(s);
7543
    gen_op_movl_T0_im((long)s->pc - 4);
7544
    gen_set_pc_T0();
7545
    gen_op_undef_insn();
7546
    s->is_jmp = DISAS_JUMP;
7547
    return;
7548
illegal_op:
7549
undef:
7550
    gen_set_condexec(s);
7551
    gen_op_movl_T0_im((long)s->pc - 2);
7552
    gen_set_pc_T0();
7553
    gen_op_undef_insn();
7554
    s->is_jmp = DISAS_JUMP;
7555
}
7556

    
7557
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7558
   basic block 'tb'. If search_pc is TRUE, also generate PC
7559
   information for each intermediate instruction. */
7560
static inline int gen_intermediate_code_internal(CPUState *env,
7561
                                                 TranslationBlock *tb,
7562
                                                 int search_pc)
7563
{
7564
    DisasContext dc1, *dc = &dc1;
7565
    uint16_t *gen_opc_end;
7566
    int j, lj;
7567
    target_ulong pc_start;
7568
    uint32_t next_page_start;
7569

    
7570
    /* generate intermediate code */
7571
    num_temps = 0;
7572
    memset(temps, 0, sizeof(temps));
7573

    
7574
    pc_start = tb->pc;
7575

    
7576
    dc->tb = tb;
7577

    
7578
    gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7579

    
7580
    dc->is_jmp = DISAS_NEXT;
7581
    dc->pc = pc_start;
7582
    dc->singlestep_enabled = env->singlestep_enabled;
7583
    dc->condjmp = 0;
7584
    dc->thumb = env->thumb;
7585
    dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
7586
    dc->condexec_cond = env->condexec_bits >> 4;
7587
    dc->is_mem = 0;
7588
#if !defined(CONFIG_USER_ONLY)
7589
    if (IS_M(env)) {
7590
        dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
7591
    } else {
7592
        dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
7593
    }
7594
#endif
7595
    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
7596
    lj = -1;
7597
    /* Reset the conditional execution bits immediately. This avoids
7598
       complications trying to do it at the end of the block.  */
7599
    if (env->condexec_bits)
7600
      gen_op_set_condexec(0);
7601
    do {
7602
#ifndef CONFIG_USER_ONLY
7603
        if (dc->pc >= 0xfffffff0 && IS_M(env)) {
7604
            /* We always get here via a jump, so know we are not in a
7605
               conditional execution block.  */
7606
            gen_op_exception_exit();
7607
        }
7608
#endif
7609

    
7610
        if (env->nb_breakpoints > 0) {
7611
            for(j = 0; j < env->nb_breakpoints; j++) {
7612
                if (env->breakpoints[j] == dc->pc) {
7613
                    gen_set_condexec(dc);
7614
                    gen_op_movl_T0_im((long)dc->pc);
7615
                    gen_set_pc_T0();
7616
                    gen_op_debug();
7617
                    dc->is_jmp = DISAS_JUMP;
7618
                    /* Advance PC so that clearing the breakpoint will
7619
                       invalidate this TB.  */
7620
                    dc->pc += 2;
7621
                    goto done_generating;
7622
                    break;
7623
                }
7624
            }
7625
        }
7626
        if (search_pc) {
7627
            j = gen_opc_ptr - gen_opc_buf;
7628
            if (lj < j) {
7629
                lj++;
7630
                while (lj < j)
7631
                    gen_opc_instr_start[lj++] = 0;
7632
            }
7633
            gen_opc_pc[lj] = dc->pc;
7634
            gen_opc_instr_start[lj] = 1;
7635
        }
7636

    
7637
        if (env->thumb) {
7638
            disas_thumb_insn(env, dc);
7639
            if (dc->condexec_mask) {
7640
                dc->condexec_cond = (dc->condexec_cond & 0xe)
7641
                                   | ((dc->condexec_mask >> 4) & 1);
7642
                dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
7643
                if (dc->condexec_mask == 0) {
7644
                    dc->condexec_cond = 0;
7645
                }
7646
            }
7647
        } else {
7648
            disas_arm_insn(env, dc);
7649
        }
7650
        if (num_temps) {
7651
            fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
7652
            num_temps = 0;
7653
        }
7654

    
7655
        if (dc->condjmp && !dc->is_jmp) {
7656
            gen_set_label(dc->condlabel);
7657
            dc->condjmp = 0;
7658
        }
7659
        /* Terminate the TB on memory ops if watchpoints are present.  */
7660
        /* FIXME: This should be replacd by the deterministic execution
7661
         * IRQ raising bits.  */
7662
        if (dc->is_mem && env->nb_watchpoints)
7663
            break;
7664

    
7665
        /* Translation stops when a conditional branch is enoutered.
7666
         * Otherwise the subsequent code could get translated several times.
7667
         * Also stop translation when a page boundary is reached.  This
7668
         * ensures prefech aborts occur at the right place.  */
7669
    } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
7670
             !env->singlestep_enabled &&
7671
             dc->pc < next_page_start);
7672

    
7673
    /* At this stage dc->condjmp will only be set when the skipped
7674
       instruction was a conditional branch or trap, and the PC has
7675
       already been written.  */
7676
    if (__builtin_expect(env->singlestep_enabled, 0)) {
7677
        /* Make sure the pc is updated, and raise a debug exception.  */
7678
        if (dc->condjmp) {
7679
            gen_set_condexec(dc);
7680
            if (dc->is_jmp == DISAS_SWI) {
7681
                gen_op_swi();
7682
            } else {
7683
                gen_op_debug();
7684
            }
7685
            gen_set_label(dc->condlabel);
7686
        }
7687
        if (dc->condjmp || !dc->is_jmp) {
7688
            gen_op_movl_T0_im((long)dc->pc);
7689
            gen_set_pc_T0();
7690
            dc->condjmp = 0;
7691
        }
7692
        gen_set_condexec(dc);
7693
        if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
7694
            gen_op_swi();
7695
        } else {
7696
            /* FIXME: Single stepping a WFI insn will not halt
7697
               the CPU.  */
7698
            gen_op_debug();
7699
        }
7700
    } else {
7701
        /* While branches must always occur at the end of an IT block,
7702
           there are a few other things that can cause us to terminate
7703
           the TB in the middel of an IT block:
7704
            - Exception generating instructions (bkpt, swi, undefined).
7705
            - Page boundaries.
7706
            - Hardware watchpoints.
7707
           Hardware breakpoints have already been handled and skip this code.
7708
         */
7709
        gen_set_condexec(dc);
7710
        switch(dc->is_jmp) {
7711
        case DISAS_NEXT:
7712
            gen_goto_tb(dc, 1, dc->pc);
7713
            break;
7714
        default:
7715
        case DISAS_JUMP:
7716
        case DISAS_UPDATE:
7717
            /* indicate that the hash table must be used to find the next TB */
7718
            tcg_gen_exit_tb(0);
7719
            break;
7720
        case DISAS_TB_JUMP:
7721
            /* nothing more to generate */
7722
            break;
7723
        case DISAS_WFI:
7724
            gen_op_wfi();
7725
            break;
7726
        case DISAS_SWI:
7727
            gen_op_swi();
7728
            break;
7729
        }
7730
        if (dc->condjmp) {
7731
            gen_set_label(dc->condlabel);
7732
            gen_set_condexec(dc);
7733
            gen_goto_tb(dc, 1, dc->pc);
7734
            dc->condjmp = 0;
7735
        }
7736
    }
7737
done_generating:
7738
    *gen_opc_ptr = INDEX_op_end;
7739

    
7740
#ifdef DEBUG_DISAS
7741
    if (loglevel & CPU_LOG_TB_IN_ASM) {
7742
        fprintf(logfile, "----------------\n");
7743
        fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
7744
        target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
7745
        fprintf(logfile, "\n");
7746
    }
7747
#endif
7748
    if (search_pc) {
7749
        j = gen_opc_ptr - gen_opc_buf;
7750
        lj++;
7751
        while (lj <= j)
7752
            gen_opc_instr_start[lj++] = 0;
7753
    } else {
7754
        tb->size = dc->pc - pc_start;
7755
    }
7756
    return 0;
7757
}
7758

    
7759
int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
7760
{
7761
    return gen_intermediate_code_internal(env, tb, 0);
7762
}
7763

    
7764
int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
7765
{
7766
    return gen_intermediate_code_internal(env, tb, 1);
7767
}
7768

    
7769
static const char *cpu_mode_names[16] = {
7770
  "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
7771
  "???", "???", "???", "und", "???", "???", "???", "sys"
7772
};
7773

    
7774
void cpu_dump_state(CPUState *env, FILE *f,
7775
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
7776
                    int flags)
7777
{
7778
    int i;
7779
    union {
7780
        uint32_t i;
7781
        float s;
7782
    } s0, s1;
7783
    CPU_DoubleU d;
7784
    /* ??? This assumes float64 and double have the same layout.
7785
       Oh well, it's only debug dumps.  */
7786
    union {
7787
        float64 f64;
7788
        double d;
7789
    } d0;
7790
    uint32_t psr;
7791

    
7792
    for(i=0;i<16;i++) {
7793
        cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
7794
        if ((i % 4) == 3)
7795
            cpu_fprintf(f, "\n");
7796
        else
7797
            cpu_fprintf(f, " ");
7798
    }
7799
    psr = cpsr_read(env);
7800
    cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
7801
                psr,
7802
                psr & (1 << 31) ? 'N' : '-',
7803
                psr & (1 << 30) ? 'Z' : '-',
7804
                psr & (1 << 29) ? 'C' : '-',
7805
                psr & (1 << 28) ? 'V' : '-',
7806
                psr & CPSR_T ? 'T' : 'A',
7807
                cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
7808

    
7809
    for (i = 0; i < 16; i++) {
7810
        d.d = env->vfp.regs[i];
7811
        s0.i = d.l.lower;
7812
        s1.i = d.l.upper;
7813
        d0.f64 = d.d;
7814
        cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
7815
                    i * 2, (int)s0.i, s0.s,
7816
                    i * 2 + 1, (int)s1.i, s1.s,
7817
                    i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
7818
                    d0.d);
7819
    }
7820
    cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
7821
}
7822