Statistics
| Branch: | Revision:

root / tcg / arm / tcg-target.c @ 24e838b7

History | View | Annotate | Download (59 kB)

1
/*
2
 * Tiny Code Generator for QEMU
3
 *
4
 * Copyright (c) 2008 Andrzej Zaborowski
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#if defined(__ARM_ARCH_7__) ||  \
26
    defined(__ARM_ARCH_7A__) || \
27
    defined(__ARM_ARCH_7EM__) || \
28
    defined(__ARM_ARCH_7M__) || \
29
    defined(__ARM_ARCH_7R__)
30
#define USE_ARMV7_INSTRUCTIONS
31
#endif
32

    
33
#if defined(USE_ARMV7_INSTRUCTIONS) || \
34
    defined(__ARM_ARCH_6J__) || \
35
    defined(__ARM_ARCH_6K__) || \
36
    defined(__ARM_ARCH_6T2__) || \
37
    defined(__ARM_ARCH_6Z__) || \
38
    defined(__ARM_ARCH_6ZK__)
39
#define USE_ARMV6_INSTRUCTIONS
40
#endif
41

    
42
#if defined(USE_ARMV6_INSTRUCTIONS) || \
43
    defined(__ARM_ARCH_5T__) || \
44
    defined(__ARM_ARCH_5TE__) || \
45
    defined(__ARM_ARCH_5TEJ__)
46
#define USE_ARMV5_INSTRUCTIONS
47
#endif
48

    
49
#ifdef USE_ARMV5_INSTRUCTIONS
50
static const int use_armv5_instructions = 1;
51
#else
52
static const int use_armv5_instructions = 0;
53
#endif
54
#undef USE_ARMV5_INSTRUCTIONS
55

    
56
#ifdef USE_ARMV6_INSTRUCTIONS
57
static const int use_armv6_instructions = 1;
58
#else
59
static const int use_armv6_instructions = 0;
60
#endif
61
#undef USE_ARMV6_INSTRUCTIONS
62

    
63
#ifdef USE_ARMV7_INSTRUCTIONS
64
static const int use_armv7_instructions = 1;
65
#else
66
static const int use_armv7_instructions = 0;
67
#endif
68
#undef USE_ARMV7_INSTRUCTIONS
69

    
70
#ifndef NDEBUG
71
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
72
    "%r0",
73
    "%r1",
74
    "%r2",
75
    "%r3",
76
    "%r4",
77
    "%r5",
78
    "%r6",
79
    "%r7",
80
    "%r8",
81
    "%r9",
82
    "%r10",
83
    "%r11",
84
    "%r12",
85
    "%r13",
86
    "%r14",
87
    "%pc",
88
};
89
#endif
90

    
91
static const int tcg_target_reg_alloc_order[] = {
92
    TCG_REG_R4,
93
    TCG_REG_R5,
94
    TCG_REG_R6,
95
    TCG_REG_R7,
96
    TCG_REG_R8,
97
    TCG_REG_R9,
98
    TCG_REG_R10,
99
    TCG_REG_R11,
100
    TCG_REG_R13,
101
    TCG_REG_R0,
102
    TCG_REG_R1,
103
    TCG_REG_R2,
104
    TCG_REG_R3,
105
    TCG_REG_R12,
106
    TCG_REG_R14,
107
};
108

    
109
static const int tcg_target_call_iarg_regs[4] = {
110
    TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
111
};
112
static const int tcg_target_call_oarg_regs[2] = {
113
    TCG_REG_R0, TCG_REG_R1
114
};
115

    
116
static inline void reloc_abs32(void *code_ptr, tcg_target_long target)
117
{
118
    *(uint32_t *) code_ptr = target;
119
}
120

    
121
static inline void reloc_pc24(void *code_ptr, tcg_target_long target)
122
{
123
    uint32_t offset = ((target - ((tcg_target_long) code_ptr + 8)) >> 2);
124

    
125
    *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
126
                             | (offset & 0xffffff);
127
}
128

    
129
static void patch_reloc(uint8_t *code_ptr, int type,
130
                tcg_target_long value, tcg_target_long addend)
131
{
132
    switch (type) {
133
    case R_ARM_ABS32:
134
        reloc_abs32(code_ptr, value);
135
        break;
136

    
137
    case R_ARM_CALL:
138
    case R_ARM_JUMP24:
139
    default:
140
        tcg_abort();
141

    
142
    case R_ARM_PC24:
143
        reloc_pc24(code_ptr, value);
144
        break;
145
    }
146
}
147

    
148
/* maximum number of register used for input function arguments */
149
static inline int tcg_target_get_call_iarg_regs_count(int flags)
150
{
151
    return 4;
152
}
153

    
154
/* parse target specific constraints */
155
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
156
{
157
    const char *ct_str;
158

    
159
    ct_str = *pct_str;
160
    switch (ct_str[0]) {
161
    case 'I':
162
         ct->ct |= TCG_CT_CONST_ARM;
163
         break;
164

    
165
    case 'r':
166
        ct->ct |= TCG_CT_REG;
167
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
168
        break;
169

    
170
    /* qemu_ld address */
171
    case 'l':
172
        ct->ct |= TCG_CT_REG;
173
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
174
#ifdef CONFIG_SOFTMMU
175
        /* r0 and r1 will be overwritten when reading the tlb entry,
176
           so don't use these. */
177
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
178
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
179
#endif
180
        break;
181
    case 'L':
182
        ct->ct |= TCG_CT_REG;
183
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
184
#ifdef CONFIG_SOFTMMU
185
        /* r1 is still needed to load data_reg or data_reg2,
186
           so don't use it. */
187
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
188
#endif
189
        break;
190

    
191
    /* qemu_st address & data_reg */
192
    case 's':
193
        ct->ct |= TCG_CT_REG;
194
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
195
        /* r0 and r1 will be overwritten when reading the tlb entry
196
           (softmmu only) and doing the byte swapping, so don't
197
           use these. */
198
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
199
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
200
        break;
201
    /* qemu_st64 data_reg2 */
202
    case 'S':
203
        ct->ct |= TCG_CT_REG;
204
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
205
        /* r0 and r1 will be overwritten when reading the tlb entry
206
            (softmmu only) and doing the byte swapping, so don't
207
            use these. */
208
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
209
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
210
#ifdef CONFIG_SOFTMMU
211
        /* r2 is still needed to load data_reg, so don't use it. */
212
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
213
#endif
214
        break;
215

    
216
    default:
217
        return -1;
218
    }
219
    ct_str++;
220
    *pct_str = ct_str;
221

    
222
    return 0;
223
}
224

    
225
static inline uint32_t rotl(uint32_t val, int n)
226
{
227
  return (val << n) | (val >> (32 - n));
228
}
229

    
230
/* ARM immediates for ALU instructions are made of an unsigned 8-bit
231
   right-rotated by an even amount between 0 and 30. */
232
static inline int encode_imm(uint32_t imm)
233
{
234
    int shift;
235

    
236
    /* simple case, only lower bits */
237
    if ((imm & ~0xff) == 0)
238
        return 0;
239
    /* then try a simple even shift */
240
    shift = ctz32(imm) & ~1;
241
    if (((imm >> shift) & ~0xff) == 0)
242
        return 32 - shift;
243
    /* now try harder with rotations */
244
    if ((rotl(imm, 2) & ~0xff) == 0)
245
        return 2;
246
    if ((rotl(imm, 4) & ~0xff) == 0)
247
        return 4;
248
    if ((rotl(imm, 6) & ~0xff) == 0)
249
        return 6;
250
    /* imm can't be encoded */
251
    return -1;
252
}
253

    
254
static inline int check_fit_imm(uint32_t imm)
255
{
256
    return encode_imm(imm) >= 0;
257
}
258

    
259
/* Test if a constant matches the constraint.
260
 * TODO: define constraints for:
261
 *
262
 * ldr/str offset:   between -0xfff and 0xfff
263
 * ldrh/strh offset: between -0xff and 0xff
264
 * mov operand2:     values represented with x << (2 * y), x < 0x100
265
 * add, sub, eor...: ditto
266
 */
267
static inline int tcg_target_const_match(tcg_target_long val,
268
                const TCGArgConstraint *arg_ct)
269
{
270
    int ct;
271
    ct = arg_ct->ct;
272
    if (ct & TCG_CT_CONST)
273
        return 1;
274
    else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val))
275
        return 1;
276
    else
277
        return 0;
278
}
279

    
280
enum arm_data_opc_e {
281
    ARITH_AND = 0x0,
282
    ARITH_EOR = 0x1,
283
    ARITH_SUB = 0x2,
284
    ARITH_RSB = 0x3,
285
    ARITH_ADD = 0x4,
286
    ARITH_ADC = 0x5,
287
    ARITH_SBC = 0x6,
288
    ARITH_RSC = 0x7,
289
    ARITH_TST = 0x8,
290
    ARITH_CMP = 0xa,
291
    ARITH_CMN = 0xb,
292
    ARITH_ORR = 0xc,
293
    ARITH_MOV = 0xd,
294
    ARITH_BIC = 0xe,
295
    ARITH_MVN = 0xf,
296
};
297

    
298
#define TO_CPSR(opc) \
299
  ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
300

    
301
#define SHIFT_IMM_LSL(im)        (((im) << 7) | 0x00)
302
#define SHIFT_IMM_LSR(im)        (((im) << 7) | 0x20)
303
#define SHIFT_IMM_ASR(im)        (((im) << 7) | 0x40)
304
#define SHIFT_IMM_ROR(im)        (((im) << 7) | 0x60)
305
#define SHIFT_REG_LSL(rs)        (((rs) << 8) | 0x10)
306
#define SHIFT_REG_LSR(rs)        (((rs) << 8) | 0x30)
307
#define SHIFT_REG_ASR(rs)        (((rs) << 8) | 0x50)
308
#define SHIFT_REG_ROR(rs)        (((rs) << 8) | 0x70)
309

    
310
enum arm_cond_code_e {
311
    COND_EQ = 0x0,
312
    COND_NE = 0x1,
313
    COND_CS = 0x2,        /* Unsigned greater or equal */
314
    COND_CC = 0x3,        /* Unsigned less than */
315
    COND_MI = 0x4,        /* Negative */
316
    COND_PL = 0x5,        /* Zero or greater */
317
    COND_VS = 0x6,        /* Overflow */
318
    COND_VC = 0x7,        /* No overflow */
319
    COND_HI = 0x8,        /* Unsigned greater than */
320
    COND_LS = 0x9,        /* Unsigned less or equal */
321
    COND_GE = 0xa,
322
    COND_LT = 0xb,
323
    COND_GT = 0xc,
324
    COND_LE = 0xd,
325
    COND_AL = 0xe,
326
};
327

    
328
static const uint8_t tcg_cond_to_arm_cond[10] = {
329
    [TCG_COND_EQ] = COND_EQ,
330
    [TCG_COND_NE] = COND_NE,
331
    [TCG_COND_LT] = COND_LT,
332
    [TCG_COND_GE] = COND_GE,
333
    [TCG_COND_LE] = COND_LE,
334
    [TCG_COND_GT] = COND_GT,
335
    /* unsigned */
336
    [TCG_COND_LTU] = COND_CC,
337
    [TCG_COND_GEU] = COND_CS,
338
    [TCG_COND_LEU] = COND_LS,
339
    [TCG_COND_GTU] = COND_HI,
340
};
341

    
342
static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
343
{
344
    tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
345
}
346

    
347
static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
348
{
349
    tcg_out32(s, (cond << 28) | 0x0a000000 |
350
                    (((offset - 8) >> 2) & 0x00ffffff));
351
}
352

    
353
static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
354
{
355
    /* We pay attention here to not modify the branch target by skipping
356
       the corresponding bytes. This ensure that caches and memory are
357
       kept coherent during retranslation. */
358
#ifdef HOST_WORDS_BIGENDIAN
359
    tcg_out8(s, (cond << 4) | 0x0a);
360
    s->code_ptr += 3;
361
#else
362
    s->code_ptr += 3;
363
    tcg_out8(s, (cond << 4) | 0x0a);
364
#endif
365
}
366

    
367
static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
368
{
369
    tcg_out32(s, (cond << 28) | 0x0b000000 |
370
                    (((offset - 8) >> 2) & 0x00ffffff));
371
}
372

    
373
static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
374
{
375
    tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
376
}
377

    
378
static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
379
{
380
    tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
381
                (((offset - 8) >> 2) & 0x00ffffff));
382
}
383

    
384
static inline void tcg_out_dat_reg(TCGContext *s,
385
                int cond, int opc, int rd, int rn, int rm, int shift)
386
{
387
    tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
388
                    (rn << 16) | (rd << 12) | shift | rm);
389
}
390

    
391
static inline void tcg_out_dat_reg2(TCGContext *s,
392
                int cond, int opc0, int opc1, int rd0, int rd1,
393
                int rn0, int rn1, int rm0, int rm1, int shift)
394
{
395
    if (rd0 == rn1 || rd0 == rm1) {
396
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
397
                        (rn0 << 16) | (8 << 12) | shift | rm0);
398
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
399
                        (rn1 << 16) | (rd1 << 12) | shift | rm1);
400
        tcg_out_dat_reg(s, cond, ARITH_MOV,
401
                        rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
402
    } else {
403
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
404
                        (rn0 << 16) | (rd0 << 12) | shift | rm0);
405
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
406
                        (rn1 << 16) | (rd1 << 12) | shift | rm1);
407
    }
408
}
409

    
410
static inline void tcg_out_dat_imm(TCGContext *s,
411
                int cond, int opc, int rd, int rn, int im)
412
{
413
    tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
414
                    (rn << 16) | (rd << 12) | im);
415
}
416

    
417
static inline void tcg_out_movi32(TCGContext *s,
418
                int cond, int rd, uint32_t arg)
419
{
420
    /* TODO: This is very suboptimal, we can easily have a constant
421
     * pool somewhere after all the instructions.  */
422
    if ((int)arg < 0 && (int)arg >= -0x100) {
423
        tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
424
    } else if (use_armv7_instructions) {
425
        /* use movw/movt */
426
        /* movw */
427
        tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
428
                  | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
429
        if (arg & 0xffff0000) {
430
            /* movt */
431
            tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
432
                      | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
433
        }
434
    } else {
435
        int opc = ARITH_MOV;
436
        int rn = 0;
437

    
438
        do {
439
            int i, rot;
440

    
441
            i = ctz32(arg) & ~1;
442
            rot = ((32 - i) << 7) & 0xf00;
443
            tcg_out_dat_imm(s, cond, opc, rd, rn, ((arg >> i) & 0xff) | rot);
444
            arg &= ~(0xff << i);
445

    
446
            opc = ARITH_ORR;
447
            rn = rd;
448
        } while (arg);
449
    }
450
}
451

    
452
static inline void tcg_out_mul32(TCGContext *s,
453
                int cond, int rd, int rs, int rm)
454
{
455
    if (rd != rm)
456
        tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
457
                        (rs << 8) | 0x90 | rm);
458
    else if (rd != rs)
459
        tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
460
                        (rm << 8) | 0x90 | rs);
461
    else {
462
        tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
463
                        (rs << 8) | 0x90 | rm);
464
        tcg_out_dat_reg(s, cond, ARITH_MOV,
465
                        rd, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
466
    }
467
}
468

    
469
static inline void tcg_out_umull32(TCGContext *s,
470
                int cond, int rd0, int rd1, int rs, int rm)
471
{
472
    if (rd0 != rm && rd1 != rm)
473
        tcg_out32(s, (cond << 28) | 0x800090 |
474
                        (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
475
    else if (rd0 != rs && rd1 != rs)
476
        tcg_out32(s, (cond << 28) | 0x800090 |
477
                        (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
478
    else {
479
        tcg_out_dat_reg(s, cond, ARITH_MOV,
480
                        TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
481
        tcg_out32(s, (cond << 28) | 0x800098 |
482
                        (rd1 << 16) | (rd0 << 12) | (rs << 8));
483
    }
484
}
485

    
486
static inline void tcg_out_smull32(TCGContext *s,
487
                int cond, int rd0, int rd1, int rs, int rm)
488
{
489
    if (rd0 != rm && rd1 != rm)
490
        tcg_out32(s, (cond << 28) | 0xc00090 |
491
                        (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
492
    else if (rd0 != rs && rd1 != rs)
493
        tcg_out32(s, (cond << 28) | 0xc00090 |
494
                        (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
495
    else {
496
        tcg_out_dat_reg(s, cond, ARITH_MOV,
497
                        TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
498
        tcg_out32(s, (cond << 28) | 0xc00098 |
499
                        (rd1 << 16) | (rd0 << 12) | (rs << 8));
500
    }
501
}
502

    
503
static inline void tcg_out_ext8s(TCGContext *s, int cond,
504
                                 int rd, int rn)
505
{
506
    if (use_armv6_instructions) {
507
        /* sxtb */
508
        tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
509
    } else {
510
        tcg_out_dat_reg(s, cond, ARITH_MOV,
511
                        rd, 0, rn, SHIFT_IMM_LSL(24));
512
        tcg_out_dat_reg(s, cond, ARITH_MOV,
513
                        rd, 0, rd, SHIFT_IMM_ASR(24));
514
    }
515
}
516

    
517
static inline void tcg_out_ext8u(TCGContext *s, int cond,
518
                                 int rd, int rn)
519
{
520
    tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
521
}
522

    
523
static inline void tcg_out_ext16s(TCGContext *s, int cond,
524
                                  int rd, int rn)
525
{
526
    if (use_armv6_instructions) {
527
        /* sxth */
528
        tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
529
    } else {
530
        tcg_out_dat_reg(s, cond, ARITH_MOV,
531
                        rd, 0, rn, SHIFT_IMM_LSL(16));
532
        tcg_out_dat_reg(s, cond, ARITH_MOV,
533
                        rd, 0, rd, SHIFT_IMM_ASR(16));
534
    }
535
}
536

    
537
static inline void tcg_out_ext16u(TCGContext *s, int cond,
538
                                  int rd, int rn)
539
{
540
    if (use_armv6_instructions) {
541
        /* uxth */
542
        tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
543
    } else {
544
        tcg_out_dat_reg(s, cond, ARITH_MOV,
545
                        rd, 0, rn, SHIFT_IMM_LSL(16));
546
        tcg_out_dat_reg(s, cond, ARITH_MOV,
547
                        rd, 0, rd, SHIFT_IMM_LSR(16));
548
    }
549
}
550

    
551
static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
552
{
553
    if (use_armv6_instructions) {
554
        /* revsh */
555
        tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
556
    } else {
557
        tcg_out_dat_reg(s, cond, ARITH_MOV,
558
                        TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
559
        tcg_out_dat_reg(s, cond, ARITH_MOV,
560
                        TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_ASR(16));
561
        tcg_out_dat_reg(s, cond, ARITH_ORR,
562
                        rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
563
    }
564
}
565

    
566
static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
567
{
568
    if (use_armv6_instructions) {
569
        /* rev16 */
570
        tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
571
    } else {
572
        tcg_out_dat_reg(s, cond, ARITH_MOV,
573
                        TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
574
        tcg_out_dat_reg(s, cond, ARITH_MOV,
575
                        TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_LSR(16));
576
        tcg_out_dat_reg(s, cond, ARITH_ORR,
577
                        rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
578
    }
579
}
580

    
581
static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
582
{
583
    if (use_armv6_instructions) {
584
        /* rev */
585
        tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
586
    } else {
587
        tcg_out_dat_reg(s, cond, ARITH_EOR,
588
                        TCG_REG_R8, rn, rn, SHIFT_IMM_ROR(16));
589
        tcg_out_dat_imm(s, cond, ARITH_BIC,
590
                        TCG_REG_R8, TCG_REG_R8, 0xff | 0x800);
591
        tcg_out_dat_reg(s, cond, ARITH_MOV,
592
                        rd, 0, rn, SHIFT_IMM_ROR(8));
593
        tcg_out_dat_reg(s, cond, ARITH_EOR,
594
                        rd, rd, TCG_REG_R8, SHIFT_IMM_LSR(8));
595
    }
596
}
597

    
598
static inline void tcg_out_ld32_12(TCGContext *s, int cond,
599
                int rd, int rn, tcg_target_long im)
600
{
601
    if (im >= 0)
602
        tcg_out32(s, (cond << 28) | 0x05900000 |
603
                        (rn << 16) | (rd << 12) | (im & 0xfff));
604
    else
605
        tcg_out32(s, (cond << 28) | 0x05100000 |
606
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
607
}
608

    
609
static inline void tcg_out_st32_12(TCGContext *s, int cond,
610
                int rd, int rn, tcg_target_long im)
611
{
612
    if (im >= 0)
613
        tcg_out32(s, (cond << 28) | 0x05800000 |
614
                        (rn << 16) | (rd << 12) | (im & 0xfff));
615
    else
616
        tcg_out32(s, (cond << 28) | 0x05000000 |
617
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
618
}
619

    
620
static inline void tcg_out_ld32_r(TCGContext *s, int cond,
621
                int rd, int rn, int rm)
622
{
623
    tcg_out32(s, (cond << 28) | 0x07900000 |
624
                    (rn << 16) | (rd << 12) | rm);
625
}
626

    
627
static inline void tcg_out_st32_r(TCGContext *s, int cond,
628
                int rd, int rn, int rm)
629
{
630
    tcg_out32(s, (cond << 28) | 0x07800000 |
631
                    (rn << 16) | (rd << 12) | rm);
632
}
633

    
634
/* Register pre-increment with base writeback.  */
635
static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
636
                int rd, int rn, int rm)
637
{
638
    tcg_out32(s, (cond << 28) | 0x07b00000 |
639
                    (rn << 16) | (rd << 12) | rm);
640
}
641

    
642
static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
643
                int rd, int rn, int rm)
644
{
645
    tcg_out32(s, (cond << 28) | 0x07a00000 |
646
                    (rn << 16) | (rd << 12) | rm);
647
}
648

    
649
static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
650
                int rd, int rn, tcg_target_long im)
651
{
652
    if (im >= 0)
653
        tcg_out32(s, (cond << 28) | 0x01d000b0 |
654
                        (rn << 16) | (rd << 12) |
655
                        ((im & 0xf0) << 4) | (im & 0xf));
656
    else
657
        tcg_out32(s, (cond << 28) | 0x015000b0 |
658
                        (rn << 16) | (rd << 12) |
659
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
660
}
661

    
662
static inline void tcg_out_st16_8(TCGContext *s, int cond,
663
                int rd, int rn, tcg_target_long im)
664
{
665
    if (im >= 0)
666
        tcg_out32(s, (cond << 28) | 0x01c000b0 |
667
                        (rn << 16) | (rd << 12) |
668
                        ((im & 0xf0) << 4) | (im & 0xf));
669
    else
670
        tcg_out32(s, (cond << 28) | 0x014000b0 |
671
                        (rn << 16) | (rd << 12) |
672
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
673
}
674

    
675
static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
676
                int rd, int rn, int rm)
677
{
678
    tcg_out32(s, (cond << 28) | 0x019000b0 |
679
                    (rn << 16) | (rd << 12) | rm);
680
}
681

    
682
static inline void tcg_out_st16_r(TCGContext *s, int cond,
683
                int rd, int rn, int rm)
684
{
685
    tcg_out32(s, (cond << 28) | 0x018000b0 |
686
                    (rn << 16) | (rd << 12) | rm);
687
}
688

    
689
static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
690
                int rd, int rn, tcg_target_long im)
691
{
692
    if (im >= 0)
693
        tcg_out32(s, (cond << 28) | 0x01d000f0 |
694
                        (rn << 16) | (rd << 12) |
695
                        ((im & 0xf0) << 4) | (im & 0xf));
696
    else
697
        tcg_out32(s, (cond << 28) | 0x015000f0 |
698
                        (rn << 16) | (rd << 12) |
699
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
700
}
701

    
702
static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
703
                int rd, int rn, int rm)
704
{
705
    tcg_out32(s, (cond << 28) | 0x019000f0 |
706
                    (rn << 16) | (rd << 12) | rm);
707
}
708

    
709
static inline void tcg_out_ld8_12(TCGContext *s, int cond,
710
                int rd, int rn, tcg_target_long im)
711
{
712
    if (im >= 0)
713
        tcg_out32(s, (cond << 28) | 0x05d00000 |
714
                        (rn << 16) | (rd << 12) | (im & 0xfff));
715
    else
716
        tcg_out32(s, (cond << 28) | 0x05500000 |
717
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
718
}
719

    
720
static inline void tcg_out_st8_12(TCGContext *s, int cond,
721
                int rd, int rn, tcg_target_long im)
722
{
723
    if (im >= 0)
724
        tcg_out32(s, (cond << 28) | 0x05c00000 |
725
                        (rn << 16) | (rd << 12) | (im & 0xfff));
726
    else
727
        tcg_out32(s, (cond << 28) | 0x05400000 |
728
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
729
}
730

    
731
static inline void tcg_out_ld8_r(TCGContext *s, int cond,
732
                int rd, int rn, int rm)
733
{
734
    tcg_out32(s, (cond << 28) | 0x07d00000 |
735
                    (rn << 16) | (rd << 12) | rm);
736
}
737

    
738
static inline void tcg_out_st8_r(TCGContext *s, int cond,
739
                int rd, int rn, int rm)
740
{
741
    tcg_out32(s, (cond << 28) | 0x07c00000 |
742
                    (rn << 16) | (rd << 12) | rm);
743
}
744

    
745
static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
746
                int rd, int rn, tcg_target_long im)
747
{
748
    if (im >= 0)
749
        tcg_out32(s, (cond << 28) | 0x01d000d0 |
750
                        (rn << 16) | (rd << 12) |
751
                        ((im & 0xf0) << 4) | (im & 0xf));
752
    else
753
        tcg_out32(s, (cond << 28) | 0x015000d0 |
754
                        (rn << 16) | (rd << 12) |
755
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
756
}
757

    
758
static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
759
                int rd, int rn, int rm)
760
{
761
    tcg_out32(s, (cond << 28) | 0x019000d0 |
762
                    (rn << 16) | (rd << 12) | rm);
763
}
764

    
765
static inline void tcg_out_ld32u(TCGContext *s, int cond,
766
                int rd, int rn, int32_t offset)
767
{
768
    if (offset > 0xfff || offset < -0xfff) {
769
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
770
        tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
771
    } else
772
        tcg_out_ld32_12(s, cond, rd, rn, offset);
773
}
774

    
775
static inline void tcg_out_st32(TCGContext *s, int cond,
776
                int rd, int rn, int32_t offset)
777
{
778
    if (offset > 0xfff || offset < -0xfff) {
779
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
780
        tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
781
    } else
782
        tcg_out_st32_12(s, cond, rd, rn, offset);
783
}
784

    
785
static inline void tcg_out_ld16u(TCGContext *s, int cond,
786
                int rd, int rn, int32_t offset)
787
{
788
    if (offset > 0xff || offset < -0xff) {
789
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
790
        tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
791
    } else
792
        tcg_out_ld16u_8(s, cond, rd, rn, offset);
793
}
794

    
795
static inline void tcg_out_ld16s(TCGContext *s, int cond,
796
                int rd, int rn, int32_t offset)
797
{
798
    if (offset > 0xff || offset < -0xff) {
799
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
800
        tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
801
    } else
802
        tcg_out_ld16s_8(s, cond, rd, rn, offset);
803
}
804

    
805
static inline void tcg_out_st16(TCGContext *s, int cond,
806
                int rd, int rn, int32_t offset)
807
{
808
    if (offset > 0xff || offset < -0xff) {
809
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
810
        tcg_out_st16_r(s, cond, rd, rn, TCG_REG_R8);
811
    } else
812
        tcg_out_st16_8(s, cond, rd, rn, offset);
813
}
814

    
815
static inline void tcg_out_ld8u(TCGContext *s, int cond,
816
                int rd, int rn, int32_t offset)
817
{
818
    if (offset > 0xfff || offset < -0xfff) {
819
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
820
        tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
821
    } else
822
        tcg_out_ld8_12(s, cond, rd, rn, offset);
823
}
824

    
825
static inline void tcg_out_ld8s(TCGContext *s, int cond,
826
                int rd, int rn, int32_t offset)
827
{
828
    if (offset > 0xff || offset < -0xff) {
829
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
830
        tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
831
    } else
832
        tcg_out_ld8s_8(s, cond, rd, rn, offset);
833
}
834

    
835
static inline void tcg_out_st8(TCGContext *s, int cond,
836
                int rd, int rn, int32_t offset)
837
{
838
    if (offset > 0xfff || offset < -0xfff) {
839
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
840
        tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
841
    } else
842
        tcg_out_st8_12(s, cond, rd, rn, offset);
843
}
844

    
845
static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
846
{
847
    int32_t val;
848

    
849
    if (addr & 1) {
850
        /* goto to a Thumb destination isn't supported */
851
        tcg_abort();
852
    }
853

    
854
    val = addr - (tcg_target_long) s->code_ptr;
855
    if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
856
        tcg_out_b(s, cond, val);
857
    else {
858
#if 1
859
        tcg_abort();
860
#else
861
        if (cond == COND_AL) {
862
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
863
            tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
864
        } else {
865
            tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
866
            tcg_out_dat_reg(s, cond, ARITH_ADD,
867
                            TCG_REG_PC, TCG_REG_PC,
868
                            TCG_REG_R8, SHIFT_IMM_LSL(0));
869
        }
870
#endif
871
    }
872
}
873

    
874
static inline void tcg_out_call(TCGContext *s, uint32_t addr)
875
{
876
    int32_t val;
877

    
878
    val = addr - (tcg_target_long) s->code_ptr;
879
    if (val - 8 < 0x02000000 && val - 8 >= -0x02000000) {
880
        if (addr & 1) {
881
            /* Use BLX if the target is in Thumb mode */
882
            if (!use_armv5_instructions) {
883
                tcg_abort();
884
            }
885
            tcg_out_blx_imm(s, val);
886
        } else {
887
            tcg_out_bl(s, COND_AL, val);
888
        }
889
    } else {
890
#if 1
891
        tcg_abort();
892
#else
893
        if (cond == COND_AL) {
894
            tcg_out_dat_imm(s, cond, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
895
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
896
            tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
897
        } else {
898
            tcg_out_movi32(s, cond, TCG_REG_R9, addr);
899
            tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
900
                            TCG_REG_PC, SHIFT_IMM_LSL(0));
901
            tcg_out_bx(s, cond, TCG_REG_R9);
902
        }
903
#endif
904
    }
905
}
906

    
907
static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
908
{
909
    if (use_armv5_instructions) {
910
        tcg_out_blx(s, cond, arg);
911
    } else {
912
        tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
913
                        TCG_REG_PC, SHIFT_IMM_LSL(0));
914
        tcg_out_bx(s, cond, arg);
915
    }
916
}
917

    
918
static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
919
{
920
    TCGLabel *l = &s->labels[label_index];
921

    
922
    if (l->has_value)
923
        tcg_out_goto(s, cond, l->u.value);
924
    else if (cond == COND_AL) {
925
        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
926
        tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
927
        s->code_ptr += 4;
928
    } else {
929
        /* Probably this should be preferred even for COND_AL... */
930
        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
931
        tcg_out_b_noaddr(s, cond);
932
    }
933
}
934

    
935
#ifdef CONFIG_SOFTMMU
936

    
937
#include "../../softmmu_defs.h"
938

    
939
static void *qemu_ld_helpers[4] = {
940
    __ldb_mmu,
941
    __ldw_mmu,
942
    __ldl_mmu,
943
    __ldq_mmu,
944
};
945

    
946
static void *qemu_st_helpers[4] = {
947
    __stb_mmu,
948
    __stw_mmu,
949
    __stl_mmu,
950
    __stq_mmu,
951
};
952
#endif
953

    
954
#define TLB_SHIFT        (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
955

    
956
static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
957
{
958
    int addr_reg, data_reg, data_reg2, bswap;
959
#ifdef CONFIG_SOFTMMU
960
    int mem_index, s_bits;
961
# if TARGET_LONG_BITS == 64
962
    int addr_reg2;
963
# endif
964
    uint32_t *label_ptr;
965
#endif
966

    
967
#ifdef TARGET_WORDS_BIGENDIAN
968
    bswap = 1;
969
#else
970
    bswap = 0;
971
#endif
972
    data_reg = *args++;
973
    if (opc == 3)
974
        data_reg2 = *args++;
975
    else
976
        data_reg2 = 0; /* suppress warning */
977
    addr_reg = *args++;
978
#ifdef CONFIG_SOFTMMU
979
# if TARGET_LONG_BITS == 64
980
    addr_reg2 = *args++;
981
# endif
982
    mem_index = *args;
983
    s_bits = opc & 3;
984

    
985
    /* Should generate something like the following:
986
     *  shr r8, addr_reg, #TARGET_PAGE_BITS
987
     *  and r0, r8, #(CPU_TLB_SIZE - 1)   @ Assumption: CPU_TLB_BITS <= 8
988
     *  add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
989
     */
990
#  if CPU_TLB_BITS > 8
991
#   error
992
#  endif
993
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_R8,
994
                    0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
995
    tcg_out_dat_imm(s, COND_AL, ARITH_AND,
996
                    TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
997
    tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
998
                    TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
999
    /* In the
1000
     *  ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
1001
     * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1002
     * not exceed otherwise, so use an
1003
     *  add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1004
     * before.
1005
     */
1006
    if (mem_index)
1007
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
1008
                        (mem_index << (TLB_SHIFT & 1)) |
1009
                        ((16 - (TLB_SHIFT >> 1)) << 8));
1010
    tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
1011
                    offsetof(CPUState, tlb_table[0][0].addr_read));
1012
    tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
1013
                    TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1014
    /* Check alignment.  */
1015
    if (s_bits)
1016
        tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1017
                        0, addr_reg, (1 << s_bits) - 1);
1018
#  if TARGET_LONG_BITS == 64
1019
    /* XXX: possibly we could use a block data load or writeback in
1020
     * the first access.  */
1021
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1022
                    offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
1023
    tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1024
                    TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
1025
#  endif
1026
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1027
                    offsetof(CPUState, tlb_table[0][0].addend));
1028

    
1029
    switch (opc) {
1030
    case 0:
1031
        tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1032
        break;
1033
    case 0 | 4:
1034
        tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1035
        break;
1036
    case 1:
1037
        tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1038
        if (bswap) {
1039
            tcg_out_bswap16(s, COND_EQ, data_reg, data_reg);
1040
        }
1041
        break;
1042
    case 1 | 4:
1043
        if (bswap) {
1044
            tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1045
            tcg_out_bswap16s(s, COND_EQ, data_reg, data_reg);
1046
        } else {
1047
            tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1048
        }
1049
        break;
1050
    case 2:
1051
    default:
1052
        tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1053
        if (bswap) {
1054
            tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1055
        }
1056
        break;
1057
    case 3:
1058
        if (bswap) {
1059
            tcg_out_ld32_rwb(s, COND_EQ, data_reg2, TCG_REG_R1, addr_reg);
1060
            tcg_out_ld32_12(s, COND_EQ, data_reg, TCG_REG_R1, 4);
1061
            tcg_out_bswap32(s, COND_EQ, data_reg2, data_reg2);
1062
            tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1063
        } else {
1064
            tcg_out_ld32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1065
            tcg_out_ld32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1066
        }
1067
        break;
1068
    }
1069

    
1070
    label_ptr = (void *) s->code_ptr;
1071
    tcg_out_b_noaddr(s, COND_EQ);
1072

    
1073
    /* TODO: move this code to where the constants pool will be */
1074
    if (addr_reg != TCG_REG_R0) {
1075
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1076
                        TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1077
    }
1078
# if TARGET_LONG_BITS == 32
1079
    tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R1, 0, mem_index);
1080
# else
1081
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1082
                    TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1083
    tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1084
# endif
1085
    tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[s_bits]);
1086

    
1087
    switch (opc) {
1088
    case 0 | 4:
1089
        tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0);
1090
        break;
1091
    case 1 | 4:
1092
        tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0);
1093
        break;
1094
    case 0:
1095
    case 1:
1096
    case 2:
1097
    default:
1098
        if (data_reg != TCG_REG_R0) {
1099
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1100
                            data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
1101
        }
1102
        break;
1103
    case 3:
1104
        if (data_reg != TCG_REG_R0) {
1105
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1106
                            data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
1107
        }
1108
        if (data_reg2 != TCG_REG_R1) {
1109
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1110
                            data_reg2, 0, TCG_REG_R1, SHIFT_IMM_LSL(0));
1111
        }
1112
        break;
1113
    }
1114

    
1115
    reloc_pc24(label_ptr, (tcg_target_long)s->code_ptr);
1116
#else /* !CONFIG_SOFTMMU */
1117
    if (GUEST_BASE) {
1118
        uint32_t offset = GUEST_BASE;
1119
        int i;
1120
        int rot;
1121

    
1122
        while (offset) {
1123
            i = ctz32(offset) & ~1;
1124
            rot = ((32 - i) << 7) & 0xf00;
1125

    
1126
            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R8, addr_reg,
1127
                            ((offset >> i) & 0xff) | rot);
1128
            addr_reg = TCG_REG_R8;
1129
            offset &= ~(0xff << i);
1130
        }
1131
    }
1132
    switch (opc) {
1133
    case 0:
1134
        tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1135
        break;
1136
    case 0 | 4:
1137
        tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1138
        break;
1139
    case 1:
1140
        tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1141
        if (bswap) {
1142
            tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
1143
        }
1144
        break;
1145
    case 1 | 4:
1146
        if (bswap) {
1147
            tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1148
            tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
1149
        } else {
1150
            tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1151
        }
1152
        break;
1153
    case 2:
1154
    default:
1155
        tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1156
        if (bswap) {
1157
            tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1158
        }
1159
        break;
1160
    case 3:
1161
        /* TODO: use block load -
1162
         * check that data_reg2 > data_reg or the other way */
1163
        if (data_reg == addr_reg) {
1164
            tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1165
            tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1166
        } else {
1167
            tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1168
            tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1169
        }
1170
        if (bswap) {
1171
            tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1172
            tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
1173
        }
1174
        break;
1175
    }
1176
#endif
1177
}
1178

    
1179
static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1180
{
1181
    int addr_reg, data_reg, data_reg2, bswap;
1182
#ifdef CONFIG_SOFTMMU
1183
    int mem_index, s_bits;
1184
# if TARGET_LONG_BITS == 64
1185
    int addr_reg2;
1186
# endif
1187
    uint32_t *label_ptr;
1188
#endif
1189

    
1190
#ifdef TARGET_WORDS_BIGENDIAN
1191
    bswap = 1;
1192
#else
1193
    bswap = 0;
1194
#endif
1195
    data_reg = *args++;
1196
    if (opc == 3)
1197
        data_reg2 = *args++;
1198
    else
1199
        data_reg2 = 0; /* suppress warning */
1200
    addr_reg = *args++;
1201
#ifdef CONFIG_SOFTMMU
1202
# if TARGET_LONG_BITS == 64
1203
    addr_reg2 = *args++;
1204
# endif
1205
    mem_index = *args;
1206
    s_bits = opc & 3;
1207

    
1208
    /* Should generate something like the following:
1209
     *  shr r8, addr_reg, #TARGET_PAGE_BITS
1210
     *  and r0, r8, #(CPU_TLB_SIZE - 1)   @ Assumption: CPU_TLB_BITS <= 8
1211
     *  add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1212
     */
1213
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1214
                    TCG_REG_R8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1215
    tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1216
                    TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
1217
    tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
1218
                    TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
1219
    /* In the
1220
     *  ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1221
     * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1222
     * not exceed otherwise, so use an
1223
     *  add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1224
     * before.
1225
     */
1226
    if (mem_index)
1227
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
1228
                        (mem_index << (TLB_SHIFT & 1)) |
1229
                        ((16 - (TLB_SHIFT >> 1)) << 8));
1230
    tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
1231
                    offsetof(CPUState, tlb_table[0][0].addr_write));
1232
    tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
1233
                    TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1234
    /* Check alignment.  */
1235
    if (s_bits)
1236
        tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1237
                        0, addr_reg, (1 << s_bits) - 1);
1238
#  if TARGET_LONG_BITS == 64
1239
    /* XXX: possibly we could use a block data load or writeback in
1240
     * the first access.  */
1241
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1242
                    offsetof(CPUState, tlb_table[0][0].addr_write) + 4);
1243
    tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1244
                    TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
1245
#  endif
1246
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1247
                    offsetof(CPUState, tlb_table[0][0].addend));
1248

    
1249
    switch (opc) {
1250
    case 0:
1251
        tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1252
        break;
1253
    case 1:
1254
        if (bswap) {
1255
            tcg_out_bswap16(s, COND_EQ, TCG_REG_R0, data_reg);
1256
            tcg_out_st16_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1257
        } else {
1258
            tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1259
        }
1260
        break;
1261
    case 2:
1262
    default:
1263
        if (bswap) {
1264
            tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1265
            tcg_out_st32_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1266
        } else {
1267
            tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1268
        }
1269
        break;
1270
    case 3:
1271
        if (bswap) {
1272
            tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg2);
1273
            tcg_out_st32_rwb(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, addr_reg);
1274
            tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1275
            tcg_out_st32_12(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, 4);
1276
        } else {
1277
            tcg_out_st32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1278
            tcg_out_st32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1279
        }
1280
        break;
1281
    }
1282

    
1283
    label_ptr = (void *) s->code_ptr;
1284
    tcg_out_b_noaddr(s, COND_EQ);
1285

    
1286
    /* TODO: move this code to where the constants pool will be */
1287
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1288
                    TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1289
# if TARGET_LONG_BITS == 32
1290
    switch (opc) {
1291
    case 0:
1292
        tcg_out_ext8u(s, COND_AL, TCG_REG_R1, data_reg);
1293
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1294
        break;
1295
    case 1:
1296
        tcg_out_ext16u(s, COND_AL, TCG_REG_R1, data_reg);
1297
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1298
        break;
1299
    case 2:
1300
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1301
                        TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0));
1302
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1303
        break;
1304
    case 3:
1305
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R8, 0, mem_index);
1306
        tcg_out32(s, (COND_AL << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1307
        if (data_reg != TCG_REG_R2) {
1308
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1309
                            TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1310
        }
1311
        if (data_reg2 != TCG_REG_R3) {
1312
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1313
                            TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1314
        }
1315
        break;
1316
    }
1317
# else
1318
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1319
                    TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1320
    switch (opc) {
1321
    case 0:
1322
        tcg_out_ext8u(s, COND_AL, TCG_REG_R2, data_reg);
1323
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1324
        break;
1325
    case 1:
1326
        tcg_out_ext16u(s, COND_AL, TCG_REG_R2, data_reg);
1327
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1328
        break;
1329
    case 2:
1330
        if (data_reg != TCG_REG_R2) {
1331
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1332
                            TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1333
        }
1334
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1335
        break;
1336
    case 3:
1337
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R8, 0, mem_index);
1338
        tcg_out32(s, (COND_AL << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1339
        if (data_reg != TCG_REG_R2) {
1340
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1341
                            TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1342
        }
1343
        if (data_reg2 != TCG_REG_R3) {
1344
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1345
                            TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1346
        }
1347
        break;
1348
    }
1349
# endif
1350

    
1351
    tcg_out_call(s, (tcg_target_long) qemu_st_helpers[s_bits]);
1352
    if (opc == 3)
1353
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10);
1354

    
1355
    reloc_pc24(label_ptr, (tcg_target_long)s->code_ptr);
1356
#else /* !CONFIG_SOFTMMU */
1357
    if (GUEST_BASE) {
1358
        uint32_t offset = GUEST_BASE;
1359
        int i;
1360
        int rot;
1361

    
1362
        while (offset) {
1363
            i = ctz32(offset) & ~1;
1364
            rot = ((32 - i) << 7) & 0xf00;
1365

    
1366
            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R1, addr_reg,
1367
                            ((offset >> i) & 0xff) | rot);
1368
            addr_reg = TCG_REG_R1;
1369
            offset &= ~(0xff << i);
1370
        }
1371
    }
1372
    switch (opc) {
1373
    case 0:
1374
        tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1375
        break;
1376
    case 1:
1377
        if (bswap) {
1378
            tcg_out_bswap16(s, COND_AL, TCG_REG_R0, data_reg);
1379
            tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1380
        } else {
1381
            tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
1382
        }
1383
        break;
1384
    case 2:
1385
    default:
1386
        if (bswap) {
1387
            tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1388
            tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1389
        } else {
1390
            tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1391
        }
1392
        break;
1393
    case 3:
1394
        /* TODO: use block store -
1395
         * check that data_reg2 > data_reg or the other way */
1396
        if (bswap) {
1397
            tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1398
            tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1399
            tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1400
            tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 4);
1401
        } else {
1402
            tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1403
            tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1404
        }
1405
        break;
1406
    }
1407
#endif
1408
}
1409

    
1410
static uint8_t *tb_ret_addr;
1411

    
1412
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1413
                const TCGArg *args, const int *const_args)
1414
{
1415
    int c;
1416

    
1417
    switch (opc) {
1418
    case INDEX_op_exit_tb:
1419
        {
1420
            uint8_t *ld_ptr = s->code_ptr;
1421
            if (args[0] >> 8)
1422
                tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1423
            else
1424
                tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
1425
            tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1426
            if (args[0] >> 8) {
1427
                *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1428
                tcg_out32(s, args[0]);
1429
            }
1430
        }
1431
        break;
1432
    case INDEX_op_goto_tb:
1433
        if (s->tb_jmp_offset) {
1434
            /* Direct jump method */
1435
#if defined(USE_DIRECT_JUMP)
1436
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1437
            tcg_out_b_noaddr(s, COND_AL);
1438
#else
1439
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1440
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1441
            tcg_out32(s, 0);
1442
#endif
1443
        } else {
1444
            /* Indirect jump method */
1445
#if 1
1446
            c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1447
            if (c > 0xfff || c < -0xfff) {
1448
                tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1449
                                (tcg_target_long) (s->tb_next + args[0]));
1450
                tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1451
            } else
1452
                tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
1453
#else
1454
            tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1455
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1456
            tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1457
#endif
1458
        }
1459
        s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1460
        break;
1461
    case INDEX_op_call:
1462
        if (const_args[0])
1463
            tcg_out_call(s, args[0]);
1464
        else
1465
            tcg_out_callr(s, COND_AL, args[0]);
1466
        break;
1467
    case INDEX_op_jmp:
1468
        if (const_args[0])
1469
            tcg_out_goto(s, COND_AL, args[0]);
1470
        else
1471
            tcg_out_bx(s, COND_AL, args[0]);
1472
        break;
1473
    case INDEX_op_br:
1474
        tcg_out_goto_label(s, COND_AL, args[0]);
1475
        break;
1476

    
1477
    case INDEX_op_ld8u_i32:
1478
        tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1479
        break;
1480
    case INDEX_op_ld8s_i32:
1481
        tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1482
        break;
1483
    case INDEX_op_ld16u_i32:
1484
        tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1485
        break;
1486
    case INDEX_op_ld16s_i32:
1487
        tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1488
        break;
1489
    case INDEX_op_ld_i32:
1490
        tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1491
        break;
1492
    case INDEX_op_st8_i32:
1493
        tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1494
        break;
1495
    case INDEX_op_st16_i32:
1496
        tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1497
        break;
1498
    case INDEX_op_st_i32:
1499
        tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1500
        break;
1501

    
1502
    case INDEX_op_mov_i32:
1503
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1504
                        args[0], 0, args[1], SHIFT_IMM_LSL(0));
1505
        break;
1506
    case INDEX_op_movi_i32:
1507
        tcg_out_movi32(s, COND_AL, args[0], args[1]);
1508
        break;
1509
    case INDEX_op_add_i32:
1510
        c = ARITH_ADD;
1511
        goto gen_arith;
1512
    case INDEX_op_sub_i32:
1513
        c = ARITH_SUB;
1514
        goto gen_arith;
1515
    case INDEX_op_and_i32:
1516
        c = ARITH_AND;
1517
        goto gen_arith;
1518
    case INDEX_op_andc_i32:
1519
        c = ARITH_BIC;
1520
        goto gen_arith;
1521
    case INDEX_op_or_i32:
1522
        c = ARITH_ORR;
1523
        goto gen_arith;
1524
    case INDEX_op_xor_i32:
1525
        c = ARITH_EOR;
1526
        /* Fall through.  */
1527
    gen_arith:
1528
        if (const_args[2]) {
1529
            int rot;
1530
            rot = encode_imm(args[2]);
1531
            tcg_out_dat_imm(s, COND_AL, c,
1532
                            args[0], args[1], rotl(args[2], rot) | (rot << 7));
1533
        } else
1534
            tcg_out_dat_reg(s, COND_AL, c,
1535
                            args[0], args[1], args[2], SHIFT_IMM_LSL(0));
1536
        break;
1537
    case INDEX_op_add2_i32:
1538
        tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
1539
                        args[0], args[1], args[2], args[3],
1540
                        args[4], args[5], SHIFT_IMM_LSL(0));
1541
        break;
1542
    case INDEX_op_sub2_i32:
1543
        tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
1544
                        args[0], args[1], args[2], args[3],
1545
                        args[4], args[5], SHIFT_IMM_LSL(0));
1546
        break;
1547
    case INDEX_op_neg_i32:
1548
        tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1549
        break;
1550
    case INDEX_op_not_i32:
1551
        tcg_out_dat_reg(s, COND_AL,
1552
                        ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1553
        break;
1554
    case INDEX_op_mul_i32:
1555
        tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1556
        break;
1557
    case INDEX_op_mulu2_i32:
1558
        tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1559
        break;
1560
    /* XXX: Perhaps args[2] & 0x1f is wrong */
1561
    case INDEX_op_shl_i32:
1562
        c = const_args[2] ?
1563
                SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1564
        goto gen_shift32;
1565
    case INDEX_op_shr_i32:
1566
        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1567
                SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1568
        goto gen_shift32;
1569
    case INDEX_op_sar_i32:
1570
        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1571
                SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1572
        goto gen_shift32;
1573
    case INDEX_op_rotr_i32:
1574
        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1575
                SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1576
        /* Fall through.  */
1577
    gen_shift32:
1578
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1579
        break;
1580

    
1581
    case INDEX_op_rotl_i32:
1582
        if (const_args[2]) {
1583
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1584
                            ((0x20 - args[2]) & 0x1f) ?
1585
                            SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1586
                            SHIFT_IMM_LSL(0));
1587
        } else {
1588
            tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_R8, args[1], 0x20);
1589
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1590
                            SHIFT_REG_ROR(TCG_REG_R8));
1591
        }
1592
        break;
1593

    
1594
    case INDEX_op_brcond_i32:
1595
        if (const_args[1]) {
1596
            int rot;
1597
            rot = encode_imm(args[1]);
1598
            tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1599
                            args[0], rotl(args[1], rot) | (rot << 7));
1600
        } else {
1601
            tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1602
                            args[0], args[1], SHIFT_IMM_LSL(0));
1603
        }
1604
        tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1605
        break;
1606
    case INDEX_op_brcond2_i32:
1607
        /* The resulting conditions are:
1608
         * TCG_COND_EQ    -->  a0 == a2 && a1 == a3,
1609
         * TCG_COND_NE    --> (a0 != a2 && a1 == a3) ||  a1 != a3,
1610
         * TCG_COND_LT(U) --> (a0 <  a2 && a1 == a3) ||  a1 <  a3,
1611
         * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1612
         * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1613
         * TCG_COND_GT(U) --> (a0 >  a2 && a1 == a3) ||  a1 >  a3,
1614
         */
1615
        tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1616
                        args[1], args[3], SHIFT_IMM_LSL(0));
1617
        tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1618
                        args[0], args[2], SHIFT_IMM_LSL(0));
1619
        tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1620
        break;
1621
    case INDEX_op_setcond_i32:
1622
        if (const_args[2]) {
1623
            int rot;
1624
            rot = encode_imm(args[2]);
1625
            tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1626
                            args[1], rotl(args[2], rot) | (rot << 7));
1627
        } else {
1628
            tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1629
                            args[1], args[2], SHIFT_IMM_LSL(0));
1630
        }
1631
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1632
                        ARITH_MOV, args[0], 0, 1);
1633
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1634
                        ARITH_MOV, args[0], 0, 0);
1635
        break;
1636
    case INDEX_op_setcond2_i32:
1637
        /* See brcond2_i32 comment */
1638
        tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1639
                        args[2], args[4], SHIFT_IMM_LSL(0));
1640
        tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1641
                        args[1], args[3], SHIFT_IMM_LSL(0));
1642
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1643
                        ARITH_MOV, args[0], 0, 1);
1644
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1645
                        ARITH_MOV, args[0], 0, 0);
1646
        break;
1647

    
1648
    case INDEX_op_qemu_ld8u:
1649
        tcg_out_qemu_ld(s, args, 0);
1650
        break;
1651
    case INDEX_op_qemu_ld8s:
1652
        tcg_out_qemu_ld(s, args, 0 | 4);
1653
        break;
1654
    case INDEX_op_qemu_ld16u:
1655
        tcg_out_qemu_ld(s, args, 1);
1656
        break;
1657
    case INDEX_op_qemu_ld16s:
1658
        tcg_out_qemu_ld(s, args, 1 | 4);
1659
        break;
1660
    case INDEX_op_qemu_ld32:
1661
        tcg_out_qemu_ld(s, args, 2);
1662
        break;
1663
    case INDEX_op_qemu_ld64:
1664
        tcg_out_qemu_ld(s, args, 3);
1665
        break;
1666

    
1667
    case INDEX_op_qemu_st8:
1668
        tcg_out_qemu_st(s, args, 0);
1669
        break;
1670
    case INDEX_op_qemu_st16:
1671
        tcg_out_qemu_st(s, args, 1);
1672
        break;
1673
    case INDEX_op_qemu_st32:
1674
        tcg_out_qemu_st(s, args, 2);
1675
        break;
1676
    case INDEX_op_qemu_st64:
1677
        tcg_out_qemu_st(s, args, 3);
1678
        break;
1679

    
1680
    case INDEX_op_bswap16_i32:
1681
        tcg_out_bswap16(s, COND_AL, args[0], args[1]);
1682
        break;
1683
    case INDEX_op_bswap32_i32:
1684
        tcg_out_bswap32(s, COND_AL, args[0], args[1]);
1685
        break;
1686

    
1687
    case INDEX_op_ext8s_i32:
1688
        tcg_out_ext8s(s, COND_AL, args[0], args[1]);
1689
        break;
1690
    case INDEX_op_ext16s_i32:
1691
        tcg_out_ext16s(s, COND_AL, args[0], args[1]);
1692
        break;
1693
    case INDEX_op_ext16u_i32:
1694
        tcg_out_ext16u(s, COND_AL, args[0], args[1]);
1695
        break;
1696

    
1697
    default:
1698
        tcg_abort();
1699
    }
1700
}
1701

    
1702
static const TCGTargetOpDef arm_op_defs[] = {
1703
    { INDEX_op_exit_tb, { } },
1704
    { INDEX_op_goto_tb, { } },
1705
    { INDEX_op_call, { "ri" } },
1706
    { INDEX_op_jmp, { "ri" } },
1707
    { INDEX_op_br, { } },
1708

    
1709
    { INDEX_op_mov_i32, { "r", "r" } },
1710
    { INDEX_op_movi_i32, { "r" } },
1711

    
1712
    { INDEX_op_ld8u_i32, { "r", "r" } },
1713
    { INDEX_op_ld8s_i32, { "r", "r" } },
1714
    { INDEX_op_ld16u_i32, { "r", "r" } },
1715
    { INDEX_op_ld16s_i32, { "r", "r" } },
1716
    { INDEX_op_ld_i32, { "r", "r" } },
1717
    { INDEX_op_st8_i32, { "r", "r" } },
1718
    { INDEX_op_st16_i32, { "r", "r" } },
1719
    { INDEX_op_st_i32, { "r", "r" } },
1720

    
1721
    /* TODO: "r", "r", "ri" */
1722
    { INDEX_op_add_i32, { "r", "r", "rI" } },
1723
    { INDEX_op_sub_i32, { "r", "r", "rI" } },
1724
    { INDEX_op_mul_i32, { "r", "r", "r" } },
1725
    { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1726
    { INDEX_op_and_i32, { "r", "r", "rI" } },
1727
    { INDEX_op_andc_i32, { "r", "r", "rI" } },
1728
    { INDEX_op_or_i32, { "r", "r", "rI" } },
1729
    { INDEX_op_xor_i32, { "r", "r", "rI" } },
1730
    { INDEX_op_neg_i32, { "r", "r" } },
1731
    { INDEX_op_not_i32, { "r", "r" } },
1732

    
1733
    { INDEX_op_shl_i32, { "r", "r", "ri" } },
1734
    { INDEX_op_shr_i32, { "r", "r", "ri" } },
1735
    { INDEX_op_sar_i32, { "r", "r", "ri" } },
1736
    { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1737
    { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1738

    
1739
    { INDEX_op_brcond_i32, { "r", "rI" } },
1740
    { INDEX_op_setcond_i32, { "r", "r", "rI" } },
1741

    
1742
    /* TODO: "r", "r", "r", "r", "ri", "ri" */
1743
    { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
1744
    { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
1745
    { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
1746
    { INDEX_op_setcond2_i32, { "r", "r", "r", "r", "r" } },
1747

    
1748
#if TARGET_LONG_BITS == 32
1749
    { INDEX_op_qemu_ld8u, { "r", "l" } },
1750
    { INDEX_op_qemu_ld8s, { "r", "l" } },
1751
    { INDEX_op_qemu_ld16u, { "r", "l" } },
1752
    { INDEX_op_qemu_ld16s, { "r", "l" } },
1753
    { INDEX_op_qemu_ld32, { "r", "l" } },
1754
    { INDEX_op_qemu_ld64, { "L", "L", "l" } },
1755

    
1756
    { INDEX_op_qemu_st8, { "s", "s" } },
1757
    { INDEX_op_qemu_st16, { "s", "s" } },
1758
    { INDEX_op_qemu_st32, { "s", "s" } },
1759
    { INDEX_op_qemu_st64, { "S", "S", "s" } },
1760
#else
1761
    { INDEX_op_qemu_ld8u, { "r", "l", "l" } },
1762
    { INDEX_op_qemu_ld8s, { "r", "l", "l" } },
1763
    { INDEX_op_qemu_ld16u, { "r", "l", "l" } },
1764
    { INDEX_op_qemu_ld16s, { "r", "l", "l" } },
1765
    { INDEX_op_qemu_ld32, { "r", "l", "l" } },
1766
    { INDEX_op_qemu_ld64, { "L", "L", "l", "l" } },
1767

    
1768
    { INDEX_op_qemu_st8, { "s", "s", "s" } },
1769
    { INDEX_op_qemu_st16, { "s", "s", "s" } },
1770
    { INDEX_op_qemu_st32, { "s", "s", "s" } },
1771
    { INDEX_op_qemu_st64, { "S", "S", "s", "s" } },
1772
#endif
1773

    
1774
    { INDEX_op_bswap16_i32, { "r", "r" } },
1775
    { INDEX_op_bswap32_i32, { "r", "r" } },
1776

    
1777
    { INDEX_op_ext8s_i32, { "r", "r" } },
1778
    { INDEX_op_ext16s_i32, { "r", "r" } },
1779
    { INDEX_op_ext16u_i32, { "r", "r" } },
1780

    
1781
    { -1 },
1782
};
1783

    
1784
static void tcg_target_init(TCGContext *s)
1785
{
1786
#if !defined(CONFIG_USER_ONLY)
1787
    /* fail safe */
1788
    if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1789
        tcg_abort();
1790
#endif
1791

    
1792
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
1793
    tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1794
                     (1 << TCG_REG_R0) |
1795
                     (1 << TCG_REG_R1) |
1796
                     (1 << TCG_REG_R2) |
1797
                     (1 << TCG_REG_R3) |
1798
                     (1 << TCG_REG_R12) |
1799
                     (1 << TCG_REG_R14));
1800

    
1801
    tcg_regset_clear(s->reserved_regs);
1802
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1803
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
1804
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
1805

    
1806
    tcg_add_target_add_op_defs(arm_op_defs);
1807
}
1808

    
1809
static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
1810
                int arg1, tcg_target_long arg2)
1811
{
1812
    tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
1813
}
1814

    
1815
static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
1816
                int arg1, tcg_target_long arg2)
1817
{
1818
    tcg_out_st32(s, COND_AL, arg, arg1, arg2);
1819
}
1820

    
1821
static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
1822
{
1823
    if (val > 0)
1824
        if (val < 0x100)
1825
            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val);
1826
        else
1827
            tcg_abort();
1828
    else if (val < 0) {
1829
        if (val > -0x100)
1830
            tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val);
1831
        else
1832
            tcg_abort();
1833
    }
1834
}
1835

    
1836
static inline void tcg_out_mov(TCGContext *s, TCGType type, int ret, int arg)
1837
{
1838
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
1839
}
1840

    
1841
static inline void tcg_out_movi(TCGContext *s, TCGType type,
1842
                int ret, tcg_target_long arg)
1843
{
1844
    tcg_out_movi32(s, COND_AL, ret, arg);
1845
}
1846

    
1847
static void tcg_target_qemu_prologue(TCGContext *s)
1848
{
1849
    /* There is no need to save r7, it is used to store the address
1850
       of the env structure and is not modified by GCC. */
1851

    
1852
    /* stmdb sp!, { r4 - r6, r8 - r11, lr } */
1853
    tcg_out32(s, (COND_AL << 28) | 0x092d4f70);
1854

    
1855
    tcg_out_bx(s, COND_AL, TCG_REG_R0);
1856
    tb_ret_addr = s->code_ptr;
1857

    
1858
    /* ldmia sp!, { r4 - r6, r8 - r11, pc } */
1859
    tcg_out32(s, (COND_AL << 28) | 0x08bd8f70);
1860
}