Statistics
| Branch: | Revision:

root / tcg / arm / tcg-target.c @ 914ccf51

History | View | Annotate | Download (58.6 kB)

1
/*
2
 * Tiny Code Generator for QEMU
3
 *
4
 * Copyright (c) 2008 Andrzej Zaborowski
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#if defined(__ARM_ARCH_7__) ||  \
26
    defined(__ARM_ARCH_7A__) || \
27
    defined(__ARM_ARCH_7EM__) || \
28
    defined(__ARM_ARCH_7M__) || \
29
    defined(__ARM_ARCH_7R__)
30
#define USE_ARMV7_INSTRUCTIONS
31
#endif
32

    
33
#if defined(USE_ARMV7_INSTRUCTIONS) || \
34
    defined(__ARM_ARCH_6J__) || \
35
    defined(__ARM_ARCH_6K__) || \
36
    defined(__ARM_ARCH_6T2__) || \
37
    defined(__ARM_ARCH_6Z__) || \
38
    defined(__ARM_ARCH_6ZK__)
39
#define USE_ARMV6_INSTRUCTIONS
40
#endif
41

    
42
#if defined(USE_ARMV6_INSTRUCTIONS) || \
43
    defined(__ARM_ARCH_5T__) || \
44
    defined(__ARM_ARCH_5TE__) || \
45
    defined(__ARM_ARCH_5TEJ__)
46
#define USE_ARMV5_INSTRUCTIONS
47
#endif
48

    
49
#ifdef USE_ARMV5_INSTRUCTIONS
50
static const int use_armv5_instructions = 1;
51
#else
52
static const int use_armv5_instructions = 0;
53
#endif
54
#undef USE_ARMV5_INSTRUCTIONS
55

    
56
#ifdef USE_ARMV6_INSTRUCTIONS
57
static const int use_armv6_instructions = 1;
58
#else
59
static const int use_armv6_instructions = 0;
60
#endif
61
#undef USE_ARMV6_INSTRUCTIONS
62

    
63
#ifdef USE_ARMV7_INSTRUCTIONS
64
static const int use_armv7_instructions = 1;
65
#else
66
static const int use_armv7_instructions = 0;
67
#endif
68
#undef USE_ARMV7_INSTRUCTIONS
69

    
70
#ifndef NDEBUG
71
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
72
    "%r0",
73
    "%r1",
74
    "%r2",
75
    "%r3",
76
    "%r4",
77
    "%r5",
78
    "%r6",
79
    "%r7",
80
    "%r8",
81
    "%r9",
82
    "%r10",
83
    "%r11",
84
    "%r12",
85
    "%r13",
86
    "%r14",
87
    "%pc",
88
};
89
#endif
90

    
91
static const int tcg_target_reg_alloc_order[] = {
92
    TCG_REG_R4,
93
    TCG_REG_R5,
94
    TCG_REG_R6,
95
    TCG_REG_R7,
96
    TCG_REG_R8,
97
    TCG_REG_R9,
98
    TCG_REG_R10,
99
    TCG_REG_R11,
100
    TCG_REG_R13,
101
    TCG_REG_R0,
102
    TCG_REG_R1,
103
    TCG_REG_R2,
104
    TCG_REG_R3,
105
    TCG_REG_R12,
106
    TCG_REG_R14,
107
};
108

    
109
static const int tcg_target_call_iarg_regs[4] = {
110
    TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
111
};
112
static const int tcg_target_call_oarg_regs[2] = {
113
    TCG_REG_R0, TCG_REG_R1
114
};
115

    
116
static void patch_reloc(uint8_t *code_ptr, int type,
117
                tcg_target_long value, tcg_target_long addend)
118
{
119
    switch (type) {
120
    case R_ARM_ABS32:
121
        *(uint32_t *) code_ptr = value;
122
        break;
123

    
124
    case R_ARM_CALL:
125
    case R_ARM_JUMP24:
126
    default:
127
        tcg_abort();
128

    
129
    case R_ARM_PC24:
130
        *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & 0xff000000) |
131
                (((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff);
132
        break;
133
    }
134
}
135

    
136
/* maximum number of register used for input function arguments */
137
static inline int tcg_target_get_call_iarg_regs_count(int flags)
138
{
139
    return 4;
140
}
141

    
142
/* parse target specific constraints */
143
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
144
{
145
    const char *ct_str;
146

    
147
    ct_str = *pct_str;
148
    switch (ct_str[0]) {
149
    case 'I':
150
         ct->ct |= TCG_CT_CONST_ARM;
151
         break;
152

    
153
    case 'r':
154
        ct->ct |= TCG_CT_REG;
155
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
156
        break;
157

    
158
    /* qemu_ld address */
159
    case 'l':
160
        ct->ct |= TCG_CT_REG;
161
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
162
#ifdef CONFIG_SOFTMMU
163
        /* r0 and r1 will be overwritten when reading the tlb entry,
164
           so don't use these. */
165
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
166
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
167
#endif
168
        break;
169
    case 'L':
170
        ct->ct |= TCG_CT_REG;
171
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
172
#ifdef CONFIG_SOFTMMU
173
        /* r1 is still needed to load data_reg or data_reg2,
174
           so don't use it. */
175
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
176
#endif
177
        break;
178

    
179
    /* qemu_st address & data_reg */
180
    case 's':
181
        ct->ct |= TCG_CT_REG;
182
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
183
        /* r0 and r1 will be overwritten when reading the tlb entry
184
           (softmmu only) and doing the byte swapping, so don't
185
           use these. */
186
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
187
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
188
        break;
189
    /* qemu_st64 data_reg2 */
190
    case 'S':
191
        ct->ct |= TCG_CT_REG;
192
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
193
        /* r0 and r1 will be overwritten when reading the tlb entry
194
            (softmmu only) and doing the byte swapping, so don't
195
            use these. */
196
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
197
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
198
#ifdef CONFIG_SOFTMMU
199
        /* r2 is still needed to load data_reg, so don't use it. */
200
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
201
#endif
202
        break;
203

    
204
    default:
205
        return -1;
206
    }
207
    ct_str++;
208
    *pct_str = ct_str;
209

    
210
    return 0;
211
}
212

    
213
static inline uint32_t rotl(uint32_t val, int n)
214
{
215
  return (val << n) | (val >> (32 - n));
216
}
217

    
218
/* ARM immediates for ALU instructions are made of an unsigned 8-bit
219
   right-rotated by an even amount between 0 and 30. */
220
static inline int encode_imm(uint32_t imm)
221
{
222
    int shift;
223

    
224
    /* simple case, only lower bits */
225
    if ((imm & ~0xff) == 0)
226
        return 0;
227
    /* then try a simple even shift */
228
    shift = ctz32(imm) & ~1;
229
    if (((imm >> shift) & ~0xff) == 0)
230
        return 32 - shift;
231
    /* now try harder with rotations */
232
    if ((rotl(imm, 2) & ~0xff) == 0)
233
        return 2;
234
    if ((rotl(imm, 4) & ~0xff) == 0)
235
        return 4;
236
    if ((rotl(imm, 6) & ~0xff) == 0)
237
        return 6;
238
    /* imm can't be encoded */
239
    return -1;
240
}
241

    
242
static inline int check_fit_imm(uint32_t imm)
243
{
244
    return encode_imm(imm) >= 0;
245
}
246

    
247
/* Test if a constant matches the constraint.
248
 * TODO: define constraints for:
249
 *
250
 * ldr/str offset:   between -0xfff and 0xfff
251
 * ldrh/strh offset: between -0xff and 0xff
252
 * mov operand2:     values represented with x << (2 * y), x < 0x100
253
 * add, sub, eor...: ditto
254
 */
255
static inline int tcg_target_const_match(tcg_target_long val,
256
                const TCGArgConstraint *arg_ct)
257
{
258
    int ct;
259
    ct = arg_ct->ct;
260
    if (ct & TCG_CT_CONST)
261
        return 1;
262
    else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val))
263
        return 1;
264
    else
265
        return 0;
266
}
267

    
268
enum arm_data_opc_e {
269
    ARITH_AND = 0x0,
270
    ARITH_EOR = 0x1,
271
    ARITH_SUB = 0x2,
272
    ARITH_RSB = 0x3,
273
    ARITH_ADD = 0x4,
274
    ARITH_ADC = 0x5,
275
    ARITH_SBC = 0x6,
276
    ARITH_RSC = 0x7,
277
    ARITH_TST = 0x8,
278
    ARITH_CMP = 0xa,
279
    ARITH_CMN = 0xb,
280
    ARITH_ORR = 0xc,
281
    ARITH_MOV = 0xd,
282
    ARITH_BIC = 0xe,
283
    ARITH_MVN = 0xf,
284
};
285

    
286
#define TO_CPSR(opc) \
287
  ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
288

    
289
#define SHIFT_IMM_LSL(im)        (((im) << 7) | 0x00)
290
#define SHIFT_IMM_LSR(im)        (((im) << 7) | 0x20)
291
#define SHIFT_IMM_ASR(im)        (((im) << 7) | 0x40)
292
#define SHIFT_IMM_ROR(im)        (((im) << 7) | 0x60)
293
#define SHIFT_REG_LSL(rs)        (((rs) << 8) | 0x10)
294
#define SHIFT_REG_LSR(rs)        (((rs) << 8) | 0x30)
295
#define SHIFT_REG_ASR(rs)        (((rs) << 8) | 0x50)
296
#define SHIFT_REG_ROR(rs)        (((rs) << 8) | 0x70)
297

    
298
enum arm_cond_code_e {
299
    COND_EQ = 0x0,
300
    COND_NE = 0x1,
301
    COND_CS = 0x2,        /* Unsigned greater or equal */
302
    COND_CC = 0x3,        /* Unsigned less than */
303
    COND_MI = 0x4,        /* Negative */
304
    COND_PL = 0x5,        /* Zero or greater */
305
    COND_VS = 0x6,        /* Overflow */
306
    COND_VC = 0x7,        /* No overflow */
307
    COND_HI = 0x8,        /* Unsigned greater than */
308
    COND_LS = 0x9,        /* Unsigned less or equal */
309
    COND_GE = 0xa,
310
    COND_LT = 0xb,
311
    COND_GT = 0xc,
312
    COND_LE = 0xd,
313
    COND_AL = 0xe,
314
};
315

    
316
static const uint8_t tcg_cond_to_arm_cond[10] = {
317
    [TCG_COND_EQ] = COND_EQ,
318
    [TCG_COND_NE] = COND_NE,
319
    [TCG_COND_LT] = COND_LT,
320
    [TCG_COND_GE] = COND_GE,
321
    [TCG_COND_LE] = COND_LE,
322
    [TCG_COND_GT] = COND_GT,
323
    /* unsigned */
324
    [TCG_COND_LTU] = COND_CC,
325
    [TCG_COND_GEU] = COND_CS,
326
    [TCG_COND_LEU] = COND_LS,
327
    [TCG_COND_GTU] = COND_HI,
328
};
329

    
330
static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
331
{
332
    tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
333
}
334

    
335
static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
336
{
337
    tcg_out32(s, (cond << 28) | 0x0a000000 |
338
                    (((offset - 8) >> 2) & 0x00ffffff));
339
}
340

    
341
static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
342
{
343
#ifdef HOST_WORDS_BIGENDIAN
344
    tcg_out8(s, (cond << 4) | 0x0a);
345
    s->code_ptr += 3;
346
#else
347
    s->code_ptr += 3;
348
    tcg_out8(s, (cond << 4) | 0x0a);
349
#endif
350
}
351

    
352
static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
353
{
354
    tcg_out32(s, (cond << 28) | 0x0b000000 |
355
                    (((offset - 8) >> 2) & 0x00ffffff));
356
}
357

    
358
static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
359
{
360
    tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
361
}
362

    
363
static inline void tcg_out_dat_reg(TCGContext *s,
364
                int cond, int opc, int rd, int rn, int rm, int shift)
365
{
366
    tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
367
                    (rn << 16) | (rd << 12) | shift | rm);
368
}
369

    
370
static inline void tcg_out_dat_reg2(TCGContext *s,
371
                int cond, int opc0, int opc1, int rd0, int rd1,
372
                int rn0, int rn1, int rm0, int rm1, int shift)
373
{
374
    if (rd0 == rn1 || rd0 == rm1) {
375
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
376
                        (rn0 << 16) | (8 << 12) | shift | rm0);
377
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
378
                        (rn1 << 16) | (rd1 << 12) | shift | rm1);
379
        tcg_out_dat_reg(s, cond, ARITH_MOV,
380
                        rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
381
    } else {
382
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
383
                        (rn0 << 16) | (rd0 << 12) | shift | rm0);
384
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
385
                        (rn1 << 16) | (rd1 << 12) | shift | rm1);
386
    }
387
}
388

    
389
static inline void tcg_out_dat_imm(TCGContext *s,
390
                int cond, int opc, int rd, int rn, int im)
391
{
392
    tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
393
                    (rn << 16) | (rd << 12) | im);
394
}
395

    
396
static inline void tcg_out_movi32(TCGContext *s,
397
                int cond, int rd, int32_t arg)
398
{
399
    int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8);
400

    
401
    /* TODO: This is very suboptimal, we can easily have a constant
402
     * pool somewhere after all the instructions.  */
403

    
404
    if (arg < 0 && arg > -0x100)
405
        return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
406

    
407
    if (offset < 0x100 && offset > -0x100)
408
        return offset >= 0 ?
409
                tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) :
410
                tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset);
411

    
412
    if (use_armv7_instructions) {
413
        /* use movw/movt */
414
        /* movw */
415
        tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
416
                  | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
417
        if (arg & 0xffff0000)
418
            /* movt */
419
            tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
420
                      | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
421
    } else {
422
        tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff);
423
        if (arg & 0x0000ff00)
424
            tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
425
                            ((arg >>  8) & 0xff) | 0xc00);
426
        if (arg & 0x00ff0000)
427
            tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
428
                            ((arg >> 16) & 0xff) | 0x800);
429
        if (arg & 0xff000000)
430
            tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
431
                            ((arg >> 24) & 0xff) | 0x400);
432
        }
433
}
434

    
435
static inline void tcg_out_mul32(TCGContext *s,
436
                int cond, int rd, int rs, int rm)
437
{
438
    if (rd != rm)
439
        tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
440
                        (rs << 8) | 0x90 | rm);
441
    else if (rd != rs)
442
        tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
443
                        (rm << 8) | 0x90 | rs);
444
    else {
445
        tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
446
                        (rs << 8) | 0x90 | rm);
447
        tcg_out_dat_reg(s, cond, ARITH_MOV,
448
                        rd, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
449
    }
450
}
451

    
452
static inline void tcg_out_umull32(TCGContext *s,
453
                int cond, int rd0, int rd1, int rs, int rm)
454
{
455
    if (rd0 != rm && rd1 != rm)
456
        tcg_out32(s, (cond << 28) | 0x800090 |
457
                        (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
458
    else if (rd0 != rs && rd1 != rs)
459
        tcg_out32(s, (cond << 28) | 0x800090 |
460
                        (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
461
    else {
462
        tcg_out_dat_reg(s, cond, ARITH_MOV,
463
                        TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
464
        tcg_out32(s, (cond << 28) | 0x800098 |
465
                        (rd1 << 16) | (rd0 << 12) | (rs << 8));
466
    }
467
}
468

    
469
static inline void tcg_out_smull32(TCGContext *s,
470
                int cond, int rd0, int rd1, int rs, int rm)
471
{
472
    if (rd0 != rm && rd1 != rm)
473
        tcg_out32(s, (cond << 28) | 0xc00090 |
474
                        (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
475
    else if (rd0 != rs && rd1 != rs)
476
        tcg_out32(s, (cond << 28) | 0xc00090 |
477
                        (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
478
    else {
479
        tcg_out_dat_reg(s, cond, ARITH_MOV,
480
                        TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
481
        tcg_out32(s, (cond << 28) | 0xc00098 |
482
                        (rd1 << 16) | (rd0 << 12) | (rs << 8));
483
    }
484
}
485

    
486
static inline void tcg_out_ext8s(TCGContext *s, int cond,
487
                                 int rd, int rn)
488
{
489
    if (use_armv6_instructions) {
490
        /* sxtb */
491
        tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
492
    } else {
493
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
494
                        rd, 0, rn, SHIFT_IMM_LSL(24));
495
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
496
                        rd, 0, rd, SHIFT_IMM_ASR(24));
497
    }
498
}
499

    
500
static inline void tcg_out_ext8u(TCGContext *s, int cond,
501
                                 int rd, int rn)
502
{
503
    tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
504
}
505

    
506
static inline void tcg_out_ext16s(TCGContext *s, int cond,
507
                                  int rd, int rn)
508
{
509
    if (use_armv6_instructions) {
510
        /* sxth */
511
        tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
512
    } else {
513
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
514
                        rd, 0, rn, SHIFT_IMM_LSL(16));
515
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
516
                        rd, 0, rd, SHIFT_IMM_ASR(16));
517
    }
518
}
519

    
520
static inline void tcg_out_ext16u(TCGContext *s, int cond,
521
                                  int rd, int rn)
522
{
523
    if (use_armv6_instructions) {
524
        /* uxth */
525
        tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
526
    } else {
527
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
528
                        rd, 0, rn, SHIFT_IMM_LSL(16));
529
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
530
                        rd, 0, rd, SHIFT_IMM_LSR(16));
531
    }
532
}
533

    
534
static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
535
{
536
    if (use_armv6_instructions) {
537
        /* revsh */
538
        tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
539
    } else {
540
        tcg_out_dat_reg(s, cond, ARITH_MOV,
541
                        TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
542
        tcg_out_dat_reg(s, cond, ARITH_MOV,
543
                        TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_ASR(16));
544
        tcg_out_dat_reg(s, cond, ARITH_ORR,
545
                        rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
546
    }
547
}
548

    
549
static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
550
{
551
    if (use_armv6_instructions) {
552
        /* rev16 */
553
        tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
554
    } else {
555
        tcg_out_dat_reg(s, cond, ARITH_MOV,
556
                        TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
557
        tcg_out_dat_reg(s, cond, ARITH_MOV,
558
                        TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_LSR(16));
559
        tcg_out_dat_reg(s, cond, ARITH_ORR,
560
                        rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
561
    }
562
}
563

    
564
static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
565
{
566
    if (use_armv6_instructions) {
567
        /* rev */
568
        tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
569
    } else {
570
        tcg_out_dat_reg(s, cond, ARITH_EOR,
571
                        TCG_REG_R8, rn, rn, SHIFT_IMM_ROR(16));
572
        tcg_out_dat_imm(s, cond, ARITH_BIC,
573
                        TCG_REG_R8, TCG_REG_R8, 0xff | 0x800);
574
        tcg_out_dat_reg(s, cond, ARITH_MOV,
575
                        rd, 0, rn, SHIFT_IMM_ROR(8));
576
        tcg_out_dat_reg(s, cond, ARITH_EOR,
577
                        rd, rd, TCG_REG_R8, SHIFT_IMM_LSR(8));
578
    }
579
}
580

    
581
static inline void tcg_out_ld32_12(TCGContext *s, int cond,
582
                int rd, int rn, tcg_target_long im)
583
{
584
    if (im >= 0)
585
        tcg_out32(s, (cond << 28) | 0x05900000 |
586
                        (rn << 16) | (rd << 12) | (im & 0xfff));
587
    else
588
        tcg_out32(s, (cond << 28) | 0x05100000 |
589
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
590
}
591

    
592
static inline void tcg_out_st32_12(TCGContext *s, int cond,
593
                int rd, int rn, tcg_target_long im)
594
{
595
    if (im >= 0)
596
        tcg_out32(s, (cond << 28) | 0x05800000 |
597
                        (rn << 16) | (rd << 12) | (im & 0xfff));
598
    else
599
        tcg_out32(s, (cond << 28) | 0x05000000 |
600
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
601
}
602

    
603
static inline void tcg_out_ld32_r(TCGContext *s, int cond,
604
                int rd, int rn, int rm)
605
{
606
    tcg_out32(s, (cond << 28) | 0x07900000 |
607
                    (rn << 16) | (rd << 12) | rm);
608
}
609

    
610
static inline void tcg_out_st32_r(TCGContext *s, int cond,
611
                int rd, int rn, int rm)
612
{
613
    tcg_out32(s, (cond << 28) | 0x07800000 |
614
                    (rn << 16) | (rd << 12) | rm);
615
}
616

    
617
/* Register pre-increment with base writeback.  */
618
static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
619
                int rd, int rn, int rm)
620
{
621
    tcg_out32(s, (cond << 28) | 0x07b00000 |
622
                    (rn << 16) | (rd << 12) | rm);
623
}
624

    
625
static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
626
                int rd, int rn, int rm)
627
{
628
    tcg_out32(s, (cond << 28) | 0x07a00000 |
629
                    (rn << 16) | (rd << 12) | rm);
630
}
631

    
632
static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
633
                int rd, int rn, tcg_target_long im)
634
{
635
    if (im >= 0)
636
        tcg_out32(s, (cond << 28) | 0x01d000b0 |
637
                        (rn << 16) | (rd << 12) |
638
                        ((im & 0xf0) << 4) | (im & 0xf));
639
    else
640
        tcg_out32(s, (cond << 28) | 0x015000b0 |
641
                        (rn << 16) | (rd << 12) |
642
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
643
}
644

    
645
static inline void tcg_out_st16_8(TCGContext *s, int cond,
646
                int rd, int rn, tcg_target_long im)
647
{
648
    if (im >= 0)
649
        tcg_out32(s, (cond << 28) | 0x01c000b0 |
650
                        (rn << 16) | (rd << 12) |
651
                        ((im & 0xf0) << 4) | (im & 0xf));
652
    else
653
        tcg_out32(s, (cond << 28) | 0x014000b0 |
654
                        (rn << 16) | (rd << 12) |
655
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
656
}
657

    
658
static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
659
                int rd, int rn, int rm)
660
{
661
    tcg_out32(s, (cond << 28) | 0x019000b0 |
662
                    (rn << 16) | (rd << 12) | rm);
663
}
664

    
665
static inline void tcg_out_st16_r(TCGContext *s, int cond,
666
                int rd, int rn, int rm)
667
{
668
    tcg_out32(s, (cond << 28) | 0x018000b0 |
669
                    (rn << 16) | (rd << 12) | rm);
670
}
671

    
672
static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
673
                int rd, int rn, tcg_target_long im)
674
{
675
    if (im >= 0)
676
        tcg_out32(s, (cond << 28) | 0x01d000f0 |
677
                        (rn << 16) | (rd << 12) |
678
                        ((im & 0xf0) << 4) | (im & 0xf));
679
    else
680
        tcg_out32(s, (cond << 28) | 0x015000f0 |
681
                        (rn << 16) | (rd << 12) |
682
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
683
}
684

    
685
static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
686
                int rd, int rn, int rm)
687
{
688
    tcg_out32(s, (cond << 28) | 0x019000f0 |
689
                    (rn << 16) | (rd << 12) | rm);
690
}
691

    
692
static inline void tcg_out_ld8_12(TCGContext *s, int cond,
693
                int rd, int rn, tcg_target_long im)
694
{
695
    if (im >= 0)
696
        tcg_out32(s, (cond << 28) | 0x05d00000 |
697
                        (rn << 16) | (rd << 12) | (im & 0xfff));
698
    else
699
        tcg_out32(s, (cond << 28) | 0x05500000 |
700
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
701
}
702

    
703
static inline void tcg_out_st8_12(TCGContext *s, int cond,
704
                int rd, int rn, tcg_target_long im)
705
{
706
    if (im >= 0)
707
        tcg_out32(s, (cond << 28) | 0x05c00000 |
708
                        (rn << 16) | (rd << 12) | (im & 0xfff));
709
    else
710
        tcg_out32(s, (cond << 28) | 0x05400000 |
711
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
712
}
713

    
714
static inline void tcg_out_ld8_r(TCGContext *s, int cond,
715
                int rd, int rn, int rm)
716
{
717
    tcg_out32(s, (cond << 28) | 0x07d00000 |
718
                    (rn << 16) | (rd << 12) | rm);
719
}
720

    
721
static inline void tcg_out_st8_r(TCGContext *s, int cond,
722
                int rd, int rn, int rm)
723
{
724
    tcg_out32(s, (cond << 28) | 0x07c00000 |
725
                    (rn << 16) | (rd << 12) | rm);
726
}
727

    
728
static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
729
                int rd, int rn, tcg_target_long im)
730
{
731
    if (im >= 0)
732
        tcg_out32(s, (cond << 28) | 0x01d000d0 |
733
                        (rn << 16) | (rd << 12) |
734
                        ((im & 0xf0) << 4) | (im & 0xf));
735
    else
736
        tcg_out32(s, (cond << 28) | 0x015000d0 |
737
                        (rn << 16) | (rd << 12) |
738
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
739
}
740

    
741
static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
742
                int rd, int rn, int rm)
743
{
744
    tcg_out32(s, (cond << 28) | 0x019000d0 |
745
                    (rn << 16) | (rd << 12) | rm);
746
}
747

    
748
static inline void tcg_out_ld32u(TCGContext *s, int cond,
749
                int rd, int rn, int32_t offset)
750
{
751
    if (offset > 0xfff || offset < -0xfff) {
752
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
753
        tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
754
    } else
755
        tcg_out_ld32_12(s, cond, rd, rn, offset);
756
}
757

    
758
static inline void tcg_out_st32(TCGContext *s, int cond,
759
                int rd, int rn, int32_t offset)
760
{
761
    if (offset > 0xfff || offset < -0xfff) {
762
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
763
        tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
764
    } else
765
        tcg_out_st32_12(s, cond, rd, rn, offset);
766
}
767

    
768
static inline void tcg_out_ld16u(TCGContext *s, int cond,
769
                int rd, int rn, int32_t offset)
770
{
771
    if (offset > 0xff || offset < -0xff) {
772
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
773
        tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
774
    } else
775
        tcg_out_ld16u_8(s, cond, rd, rn, offset);
776
}
777

    
778
static inline void tcg_out_ld16s(TCGContext *s, int cond,
779
                int rd, int rn, int32_t offset)
780
{
781
    if (offset > 0xff || offset < -0xff) {
782
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
783
        tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
784
    } else
785
        tcg_out_ld16s_8(s, cond, rd, rn, offset);
786
}
787

    
788
static inline void tcg_out_st16(TCGContext *s, int cond,
789
                int rd, int rn, int32_t offset)
790
{
791
    if (offset > 0xff || offset < -0xff) {
792
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
793
        tcg_out_st16_r(s, cond, rd, rn, TCG_REG_R8);
794
    } else
795
        tcg_out_st16_8(s, cond, rd, rn, offset);
796
}
797

    
798
static inline void tcg_out_ld8u(TCGContext *s, int cond,
799
                int rd, int rn, int32_t offset)
800
{
801
    if (offset > 0xfff || offset < -0xfff) {
802
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
803
        tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
804
    } else
805
        tcg_out_ld8_12(s, cond, rd, rn, offset);
806
}
807

    
808
static inline void tcg_out_ld8s(TCGContext *s, int cond,
809
                int rd, int rn, int32_t offset)
810
{
811
    if (offset > 0xff || offset < -0xff) {
812
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
813
        tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
814
    } else
815
        tcg_out_ld8s_8(s, cond, rd, rn, offset);
816
}
817

    
818
static inline void tcg_out_st8(TCGContext *s, int cond,
819
                int rd, int rn, int32_t offset)
820
{
821
    if (offset > 0xfff || offset < -0xfff) {
822
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
823
        tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
824
    } else
825
        tcg_out_st8_12(s, cond, rd, rn, offset);
826
}
827

    
828
static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
829
{
830
    int32_t val;
831

    
832
    val = addr - (tcg_target_long) s->code_ptr;
833
    if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
834
        tcg_out_b(s, cond, val);
835
    else {
836
#if 1
837
        tcg_abort();
838
#else
839
        if (cond == COND_AL) {
840
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
841
            tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
842
        } else {
843
            tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
844
            tcg_out_dat_reg(s, cond, ARITH_ADD,
845
                            TCG_REG_PC, TCG_REG_PC,
846
                            TCG_REG_R8, SHIFT_IMM_LSL(0));
847
        }
848
#endif
849
    }
850
}
851

    
852
static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr)
853
{
854
    int32_t val;
855

    
856
    val = addr - (tcg_target_long) s->code_ptr;
857
    if (val < 0x01fffffd && val > -0x01fffffd)
858
        tcg_out_bl(s, cond, val);
859
    else {
860
#if 1
861
        tcg_abort();
862
#else
863
        if (cond == COND_AL) {
864
            tcg_out_dat_imm(s, cond, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
865
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
866
            tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
867
        } else {
868
            tcg_out_movi32(s, cond, TCG_REG_R9, addr);
869
            tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
870
                            TCG_REG_PC, SHIFT_IMM_LSL(0));
871
            tcg_out_bx(s, cond, TCG_REG_R9);
872
        }
873
#endif
874
    }
875
}
876

    
877
static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
878
{
879
    if (use_armv5_instructions) {
880
        tcg_out_blx(s, cond, arg);
881
    } else {
882
        tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
883
                        TCG_REG_PC, SHIFT_IMM_LSL(0));
884
        tcg_out_bx(s, cond, arg);
885
    }
886
}
887

    
888
static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
889
{
890
    TCGLabel *l = &s->labels[label_index];
891

    
892
    if (l->has_value)
893
        tcg_out_goto(s, cond, l->u.value);
894
    else if (cond == COND_AL) {
895
        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
896
        tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
897
        s->code_ptr += 4;
898
    } else {
899
        /* Probably this should be preferred even for COND_AL... */
900
        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
901
        tcg_out_b_noaddr(s, cond);
902
    }
903
}
904

    
905
#ifdef CONFIG_SOFTMMU
906

    
907
#include "../../softmmu_defs.h"
908

    
909
static void *qemu_ld_helpers[4] = {
910
    __ldb_mmu,
911
    __ldw_mmu,
912
    __ldl_mmu,
913
    __ldq_mmu,
914
};
915

    
916
static void *qemu_st_helpers[4] = {
917
    __stb_mmu,
918
    __stw_mmu,
919
    __stl_mmu,
920
    __stq_mmu,
921
};
922
#endif
923

    
924
#define TLB_SHIFT        (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
925

    
926
static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
927
{
928
    int addr_reg, data_reg, data_reg2, bswap;
929
#ifdef CONFIG_SOFTMMU
930
    int mem_index, s_bits;
931
# if TARGET_LONG_BITS == 64
932
    int addr_reg2;
933
# endif
934
    uint32_t *label_ptr;
935
#endif
936

    
937
#ifdef TARGET_WORDS_BIGENDIAN
938
    bswap = 1;
939
#else
940
    bswap = 0;
941
#endif
942
    data_reg = *args++;
943
    if (opc == 3)
944
        data_reg2 = *args++;
945
    else
946
        data_reg2 = 0; /* suppress warning */
947
    addr_reg = *args++;
948
#ifdef CONFIG_SOFTMMU
949
# if TARGET_LONG_BITS == 64
950
    addr_reg2 = *args++;
951
# endif
952
    mem_index = *args;
953
    s_bits = opc & 3;
954

    
955
    /* Should generate something like the following:
956
     *  shr r8, addr_reg, #TARGET_PAGE_BITS
957
     *  and r0, r8, #(CPU_TLB_SIZE - 1)   @ Assumption: CPU_TLB_BITS <= 8
958
     *  add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
959
     */
960
#  if CPU_TLB_BITS > 8
961
#   error
962
#  endif
963
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_R8,
964
                    0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
965
    tcg_out_dat_imm(s, COND_AL, ARITH_AND,
966
                    TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
967
    tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
968
                    TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
969
    /* In the
970
     *  ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
971
     * below, the offset is likely to exceed 12 bits if mem_index != 0 and
972
     * not exceed otherwise, so use an
973
     *  add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
974
     * before.
975
     */
976
    if (mem_index)
977
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
978
                        (mem_index << (TLB_SHIFT & 1)) |
979
                        ((16 - (TLB_SHIFT >> 1)) << 8));
980
    tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
981
                    offsetof(CPUState, tlb_table[0][0].addr_read));
982
    tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
983
                    TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
984
    /* Check alignment.  */
985
    if (s_bits)
986
        tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
987
                        0, addr_reg, (1 << s_bits) - 1);
988
#  if TARGET_LONG_BITS == 64
989
    /* XXX: possibly we could use a block data load or writeback in
990
     * the first access.  */
991
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
992
                    offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
993
    tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
994
                    TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
995
#  endif
996
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
997
                    offsetof(CPUState, tlb_table[0][0].addend));
998

    
999
    switch (opc) {
1000
    case 0:
1001
        tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1002
        break;
1003
    case 0 | 4:
1004
        tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1005
        break;
1006
    case 1:
1007
        tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1008
        if (bswap) {
1009
            tcg_out_bswap16(s, COND_EQ, data_reg, data_reg);
1010
        }
1011
        break;
1012
    case 1 | 4:
1013
        if (bswap) {
1014
            tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1015
            tcg_out_bswap16s(s, COND_EQ, data_reg, data_reg);
1016
        } else {
1017
            tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1018
        }
1019
        break;
1020
    case 2:
1021
    default:
1022
        tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1023
        if (bswap) {
1024
            tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1025
        }
1026
        break;
1027
    case 3:
1028
        if (bswap) {
1029
            tcg_out_ld32_rwb(s, COND_EQ, data_reg2, TCG_REG_R1, addr_reg);
1030
            tcg_out_ld32_12(s, COND_EQ, data_reg, TCG_REG_R1, 4);
1031
            tcg_out_bswap32(s, COND_EQ, data_reg2, data_reg2);
1032
            tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1033
        } else {
1034
            tcg_out_ld32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1035
            tcg_out_ld32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1036
        }
1037
        break;
1038
    }
1039

    
1040
    label_ptr = (void *) s->code_ptr;
1041
    tcg_out_b(s, COND_EQ, 8);
1042

    
1043
    /* TODO: move this code to where the constants pool will be */
1044
    if (addr_reg != TCG_REG_R0) {
1045
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1046
                        TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1047
    }
1048
# if TARGET_LONG_BITS == 32
1049
    tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R1, 0, mem_index);
1050
# else
1051
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1052
                    TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1053
    tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1054
# endif
1055
    tcg_out_bl(s, COND_AL, (tcg_target_long) qemu_ld_helpers[s_bits] -
1056
                    (tcg_target_long) s->code_ptr);
1057

    
1058
    switch (opc) {
1059
    case 0 | 4:
1060
        tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0);
1061
        break;
1062
    case 1 | 4:
1063
        tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0);
1064
        break;
1065
    case 0:
1066
    case 1:
1067
    case 2:
1068
    default:
1069
        if (data_reg != TCG_REG_R0) {
1070
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1071
                            data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
1072
        }
1073
        break;
1074
    case 3:
1075
        if (data_reg != TCG_REG_R0) {
1076
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1077
                            data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
1078
        }
1079
        if (data_reg2 != TCG_REG_R1) {
1080
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1081
                            data_reg2, 0, TCG_REG_R1, SHIFT_IMM_LSL(0));
1082
        }
1083
        break;
1084
    }
1085

    
1086
    *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
1087
#else /* !CONFIG_SOFTMMU */
1088
    if (GUEST_BASE) {
1089
        uint32_t offset = GUEST_BASE;
1090
        int i;
1091
        int rot;
1092

    
1093
        while (offset) {
1094
            i = ctz32(offset) & ~1;
1095
            rot = ((32 - i) << 7) & 0xf00;
1096

    
1097
            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R8, addr_reg,
1098
                            ((offset >> i) & 0xff) | rot);
1099
            addr_reg = TCG_REG_R8;
1100
            offset &= ~(0xff << i);
1101
        }
1102
    }
1103
    switch (opc) {
1104
    case 0:
1105
        tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1106
        break;
1107
    case 0 | 4:
1108
        tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1109
        break;
1110
    case 1:
1111
        tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1112
        if (bswap) {
1113
            tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
1114
        }
1115
        break;
1116
    case 1 | 4:
1117
        if (bswap) {
1118
            tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1119
            tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
1120
        } else {
1121
            tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1122
        }
1123
        break;
1124
    case 2:
1125
    default:
1126
        tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1127
        if (bswap) {
1128
            tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1129
        }
1130
        break;
1131
    case 3:
1132
        /* TODO: use block load -
1133
         * check that data_reg2 > data_reg or the other way */
1134
        if (data_reg == addr_reg) {
1135
            tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1136
            tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1137
        } else {
1138
            tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1139
            tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1140
        }
1141
        if (bswap) {
1142
            tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1143
            tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
1144
        }
1145
        break;
1146
    }
1147
#endif
1148
}
1149

    
1150
static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1151
{
1152
    int addr_reg, data_reg, data_reg2, bswap;
1153
#ifdef CONFIG_SOFTMMU
1154
    int mem_index, s_bits;
1155
# if TARGET_LONG_BITS == 64
1156
    int addr_reg2;
1157
# endif
1158
    uint32_t *label_ptr;
1159
#endif
1160

    
1161
#ifdef TARGET_WORDS_BIGENDIAN
1162
    bswap = 1;
1163
#else
1164
    bswap = 0;
1165
#endif
1166
    data_reg = *args++;
1167
    if (opc == 3)
1168
        data_reg2 = *args++;
1169
    else
1170
        data_reg2 = 0; /* suppress warning */
1171
    addr_reg = *args++;
1172
#ifdef CONFIG_SOFTMMU
1173
# if TARGET_LONG_BITS == 64
1174
    addr_reg2 = *args++;
1175
# endif
1176
    mem_index = *args;
1177
    s_bits = opc & 3;
1178

    
1179
    /* Should generate something like the following:
1180
     *  shr r8, addr_reg, #TARGET_PAGE_BITS
1181
     *  and r0, r8, #(CPU_TLB_SIZE - 1)   @ Assumption: CPU_TLB_BITS <= 8
1182
     *  add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1183
     */
1184
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1185
                    TCG_REG_R8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1186
    tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1187
                    TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
1188
    tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
1189
                    TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
1190
    /* In the
1191
     *  ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1192
     * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1193
     * not exceed otherwise, so use an
1194
     *  add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1195
     * before.
1196
     */
1197
    if (mem_index)
1198
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
1199
                        (mem_index << (TLB_SHIFT & 1)) |
1200
                        ((16 - (TLB_SHIFT >> 1)) << 8));
1201
    tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
1202
                    offsetof(CPUState, tlb_table[0][0].addr_write));
1203
    tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
1204
                    TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1205
    /* Check alignment.  */
1206
    if (s_bits)
1207
        tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1208
                        0, addr_reg, (1 << s_bits) - 1);
1209
#  if TARGET_LONG_BITS == 64
1210
    /* XXX: possibly we could use a block data load or writeback in
1211
     * the first access.  */
1212
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1213
                    offsetof(CPUState, tlb_table[0][0].addr_write) + 4);
1214
    tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1215
                    TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
1216
#  endif
1217
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1218
                    offsetof(CPUState, tlb_table[0][0].addend));
1219

    
1220
    switch (opc) {
1221
    case 0:
1222
        tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1223
        break;
1224
    case 1:
1225
        if (bswap) {
1226
            tcg_out_bswap16(s, COND_EQ, TCG_REG_R0, data_reg);
1227
            tcg_out_st16_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1228
        } else {
1229
            tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1230
        }
1231
        break;
1232
    case 2:
1233
    default:
1234
        if (bswap) {
1235
            tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1236
            tcg_out_st32_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1237
        } else {
1238
            tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1239
        }
1240
        break;
1241
    case 3:
1242
        if (bswap) {
1243
            tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg2);
1244
            tcg_out_st32_rwb(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, addr_reg);
1245
            tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1246
            tcg_out_st32_12(s, COND_EQ, data_reg, TCG_REG_R1, 4);
1247
        } else {
1248
            tcg_out_st32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1249
            tcg_out_st32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1250
        }
1251
        break;
1252
    }
1253

    
1254
    label_ptr = (void *) s->code_ptr;
1255
    tcg_out_b(s, COND_EQ, 8);
1256

    
1257
    /* TODO: move this code to where the constants pool will be */
1258
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1259
                    TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1260
# if TARGET_LONG_BITS == 32
1261
    switch (opc) {
1262
    case 0:
1263
        tcg_out_ext8u(s, COND_AL, TCG_REG_R1, data_reg);
1264
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1265
        break;
1266
    case 1:
1267
        tcg_out_ext16u(s, COND_AL, TCG_REG_R1, data_reg);
1268
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1269
        break;
1270
    case 2:
1271
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1272
                        TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0));
1273
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1274
        break;
1275
    case 3:
1276
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R8, 0, mem_index);
1277
        tcg_out32(s, (COND_AL << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1278
        if (data_reg != TCG_REG_R2) {
1279
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1280
                            TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1281
        }
1282
        if (data_reg2 != TCG_REG_R3) {
1283
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1284
                            TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1285
        }
1286
        break;
1287
    }
1288
# else
1289
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1290
                    TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1291
    switch (opc) {
1292
    case 0:
1293
        tcg_out_ext8u(s, COND_AL, TCG_REG_R2, data_reg);
1294
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1295
        break;
1296
    case 1:
1297
        tcg_out_ext16u(s, COND_AL, TCG_REG_R2, data_reg);
1298
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1299
        break;
1300
    case 2:
1301
        if (data_reg != TCG_REG_R2) {
1302
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1303
                            TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1304
        }
1305
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1306
        break;
1307
    case 3:
1308
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R8, 0, mem_index);
1309
        tcg_out32(s, (COND_AL << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1310
        if (data_reg != TCG_REG_R2) {
1311
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1312
                            TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1313
        }
1314
        if (data_reg2 != TCG_REG_R3) {
1315
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1316
                            TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1317
        }
1318
        break;
1319
    }
1320
# endif
1321

    
1322
    tcg_out_bl(s, COND_AL, (tcg_target_long) qemu_st_helpers[s_bits] -
1323
                    (tcg_target_long) s->code_ptr);
1324
    if (opc == 3)
1325
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10);
1326

    
1327
    *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
1328
#else /* !CONFIG_SOFTMMU */
1329
    if (GUEST_BASE) {
1330
        uint32_t offset = GUEST_BASE;
1331
        int i;
1332
        int rot;
1333

    
1334
        while (offset) {
1335
            i = ctz32(offset) & ~1;
1336
            rot = ((32 - i) << 7) & 0xf00;
1337

    
1338
            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R1, addr_reg,
1339
                            ((offset >> i) & 0xff) | rot);
1340
            addr_reg = TCG_REG_R1;
1341
            offset &= ~(0xff << i);
1342
        }
1343
    }
1344
    switch (opc) {
1345
    case 0:
1346
        tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1347
        break;
1348
    case 1:
1349
        if (bswap) {
1350
            tcg_out_bswap16(s, COND_AL, TCG_REG_R0, data_reg);
1351
            tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1352
        } else {
1353
            tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
1354
        }
1355
        break;
1356
    case 2:
1357
    default:
1358
        if (bswap) {
1359
            tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1360
            tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1361
        } else {
1362
            tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1363
        }
1364
        break;
1365
    case 3:
1366
        /* TODO: use block store -
1367
         * check that data_reg2 > data_reg or the other way */
1368
        if (bswap) {
1369
            tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1370
            tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1371
            tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1372
            tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 4);
1373
        } else {
1374
            tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1375
            tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1376
        }
1377
        break;
1378
    }
1379
#endif
1380
}
1381

    
1382
static uint8_t *tb_ret_addr;
1383

    
1384
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1385
                const TCGArg *args, const int *const_args)
1386
{
1387
    int c;
1388

    
1389
    switch (opc) {
1390
    case INDEX_op_exit_tb:
1391
        {
1392
            uint8_t *ld_ptr = s->code_ptr;
1393
            if (args[0] >> 8)
1394
                tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1395
            else
1396
                tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
1397
            tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1398
            if (args[0] >> 8) {
1399
                *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1400
                tcg_out32(s, args[0]);
1401
            }
1402
        }
1403
        break;
1404
    case INDEX_op_goto_tb:
1405
        if (s->tb_jmp_offset) {
1406
            /* Direct jump method */
1407
#if defined(USE_DIRECT_JUMP)
1408
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1409
            tcg_out_b(s, COND_AL, 8);
1410
#else
1411
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1412
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1413
            tcg_out32(s, 0);
1414
#endif
1415
        } else {
1416
            /* Indirect jump method */
1417
#if 1
1418
            c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1419
            if (c > 0xfff || c < -0xfff) {
1420
                tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1421
                                (tcg_target_long) (s->tb_next + args[0]));
1422
                tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1423
            } else
1424
                tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
1425
#else
1426
            tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1427
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1428
            tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1429
#endif
1430
        }
1431
        s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1432
        break;
1433
    case INDEX_op_call:
1434
        if (const_args[0])
1435
            tcg_out_call(s, COND_AL, args[0]);
1436
        else
1437
            tcg_out_callr(s, COND_AL, args[0]);
1438
        break;
1439
    case INDEX_op_jmp:
1440
        if (const_args[0])
1441
            tcg_out_goto(s, COND_AL, args[0]);
1442
        else
1443
            tcg_out_bx(s, COND_AL, args[0]);
1444
        break;
1445
    case INDEX_op_br:
1446
        tcg_out_goto_label(s, COND_AL, args[0]);
1447
        break;
1448

    
1449
    case INDEX_op_ld8u_i32:
1450
        tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1451
        break;
1452
    case INDEX_op_ld8s_i32:
1453
        tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1454
        break;
1455
    case INDEX_op_ld16u_i32:
1456
        tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1457
        break;
1458
    case INDEX_op_ld16s_i32:
1459
        tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1460
        break;
1461
    case INDEX_op_ld_i32:
1462
        tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1463
        break;
1464
    case INDEX_op_st8_i32:
1465
        tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1466
        break;
1467
    case INDEX_op_st16_i32:
1468
        tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1469
        break;
1470
    case INDEX_op_st_i32:
1471
        tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1472
        break;
1473

    
1474
    case INDEX_op_mov_i32:
1475
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1476
                        args[0], 0, args[1], SHIFT_IMM_LSL(0));
1477
        break;
1478
    case INDEX_op_movi_i32:
1479
        tcg_out_movi32(s, COND_AL, args[0], args[1]);
1480
        break;
1481
    case INDEX_op_add_i32:
1482
        c = ARITH_ADD;
1483
        goto gen_arith;
1484
    case INDEX_op_sub_i32:
1485
        c = ARITH_SUB;
1486
        goto gen_arith;
1487
    case INDEX_op_and_i32:
1488
        c = ARITH_AND;
1489
        goto gen_arith;
1490
    case INDEX_op_andc_i32:
1491
        c = ARITH_BIC;
1492
        goto gen_arith;
1493
    case INDEX_op_or_i32:
1494
        c = ARITH_ORR;
1495
        goto gen_arith;
1496
    case INDEX_op_xor_i32:
1497
        c = ARITH_EOR;
1498
        /* Fall through.  */
1499
    gen_arith:
1500
        if (const_args[2]) {
1501
            int rot;
1502
            rot = encode_imm(args[2]);
1503
            tcg_out_dat_imm(s, COND_AL, c,
1504
                            args[0], args[1], rotl(args[2], rot) | (rot << 7));
1505
        } else
1506
            tcg_out_dat_reg(s, COND_AL, c,
1507
                            args[0], args[1], args[2], SHIFT_IMM_LSL(0));
1508
        break;
1509
    case INDEX_op_add2_i32:
1510
        tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
1511
                        args[0], args[1], args[2], args[3],
1512
                        args[4], args[5], SHIFT_IMM_LSL(0));
1513
        break;
1514
    case INDEX_op_sub2_i32:
1515
        tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
1516
                        args[0], args[1], args[2], args[3],
1517
                        args[4], args[5], SHIFT_IMM_LSL(0));
1518
        break;
1519
    case INDEX_op_neg_i32:
1520
        tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1521
        break;
1522
    case INDEX_op_not_i32:
1523
        tcg_out_dat_reg(s, COND_AL,
1524
                        ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1525
        break;
1526
    case INDEX_op_mul_i32:
1527
        tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1528
        break;
1529
    case INDEX_op_mulu2_i32:
1530
        tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1531
        break;
1532
    /* XXX: Perhaps args[2] & 0x1f is wrong */
1533
    case INDEX_op_shl_i32:
1534
        c = const_args[2] ?
1535
                SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1536
        goto gen_shift32;
1537
    case INDEX_op_shr_i32:
1538
        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1539
                SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1540
        goto gen_shift32;
1541
    case INDEX_op_sar_i32:
1542
        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1543
                SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1544
        goto gen_shift32;
1545
    case INDEX_op_rotr_i32:
1546
        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1547
                SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1548
        /* Fall through.  */
1549
    gen_shift32:
1550
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1551
        break;
1552

    
1553
    case INDEX_op_rotl_i32:
1554
        if (const_args[2]) {
1555
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1556
                            ((0x20 - args[2]) & 0x1f) ?
1557
                            SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1558
                            SHIFT_IMM_LSL(0));
1559
        } else {
1560
            tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_R8, args[1], 0x20);
1561
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1562
                            SHIFT_REG_ROR(TCG_REG_R8));
1563
        }
1564
        break;
1565

    
1566
    case INDEX_op_brcond_i32:
1567
        if (const_args[1]) {
1568
            int rot;
1569
            rot = encode_imm(args[1]);
1570
            tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1571
                            args[0], rotl(args[1], rot) | (rot << 7));
1572
        } else {
1573
            tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1574
                            args[0], args[1], SHIFT_IMM_LSL(0));
1575
        }
1576
        tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1577
        break;
1578
    case INDEX_op_brcond2_i32:
1579
        /* The resulting conditions are:
1580
         * TCG_COND_EQ    -->  a0 == a2 && a1 == a3,
1581
         * TCG_COND_NE    --> (a0 != a2 && a1 == a3) ||  a1 != a3,
1582
         * TCG_COND_LT(U) --> (a0 <  a2 && a1 == a3) ||  a1 <  a3,
1583
         * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1584
         * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1585
         * TCG_COND_GT(U) --> (a0 >  a2 && a1 == a3) ||  a1 >  a3,
1586
         */
1587
        tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1588
                        args[1], args[3], SHIFT_IMM_LSL(0));
1589
        tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1590
                        args[0], args[2], SHIFT_IMM_LSL(0));
1591
        tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1592
        break;
1593
    case INDEX_op_setcond_i32:
1594
        if (const_args[2]) {
1595
            int rot;
1596
            rot = encode_imm(args[2]);
1597
            tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1598
                            args[1], rotl(args[2], rot) | (rot << 7));
1599
        } else {
1600
            tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1601
                            args[1], args[2], SHIFT_IMM_LSL(0));
1602
        }
1603
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1604
                        ARITH_MOV, args[0], 0, 1);
1605
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1606
                        ARITH_MOV, args[0], 0, 0);
1607
        break;
1608
    case INDEX_op_setcond2_i32:
1609
        /* See brcond2_i32 comment */
1610
        tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1611
                        args[2], args[4], SHIFT_IMM_LSL(0));
1612
        tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1613
                        args[1], args[3], SHIFT_IMM_LSL(0));
1614
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1615
                        ARITH_MOV, args[0], 0, 1);
1616
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1617
                        ARITH_MOV, args[0], 0, 0);
1618
        break;
1619

    
1620
    case INDEX_op_qemu_ld8u:
1621
        tcg_out_qemu_ld(s, args, 0);
1622
        break;
1623
    case INDEX_op_qemu_ld8s:
1624
        tcg_out_qemu_ld(s, args, 0 | 4);
1625
        break;
1626
    case INDEX_op_qemu_ld16u:
1627
        tcg_out_qemu_ld(s, args, 1);
1628
        break;
1629
    case INDEX_op_qemu_ld16s:
1630
        tcg_out_qemu_ld(s, args, 1 | 4);
1631
        break;
1632
    case INDEX_op_qemu_ld32:
1633
        tcg_out_qemu_ld(s, args, 2);
1634
        break;
1635
    case INDEX_op_qemu_ld64:
1636
        tcg_out_qemu_ld(s, args, 3);
1637
        break;
1638

    
1639
    case INDEX_op_qemu_st8:
1640
        tcg_out_qemu_st(s, args, 0);
1641
        break;
1642
    case INDEX_op_qemu_st16:
1643
        tcg_out_qemu_st(s, args, 1);
1644
        break;
1645
    case INDEX_op_qemu_st32:
1646
        tcg_out_qemu_st(s, args, 2);
1647
        break;
1648
    case INDEX_op_qemu_st64:
1649
        tcg_out_qemu_st(s, args, 3);
1650
        break;
1651

    
1652
    case INDEX_op_bswap16_i32:
1653
        tcg_out_bswap16(s, COND_AL, args[0], args[1]);
1654
        break;
1655
    case INDEX_op_bswap32_i32:
1656
        tcg_out_bswap32(s, COND_AL, args[0], args[1]);
1657
        break;
1658

    
1659
    case INDEX_op_ext8s_i32:
1660
        tcg_out_ext8s(s, COND_AL, args[0], args[1]);
1661
        break;
1662
    case INDEX_op_ext16s_i32:
1663
        tcg_out_ext16s(s, COND_AL, args[0], args[1]);
1664
        break;
1665
    case INDEX_op_ext16u_i32:
1666
        tcg_out_ext16u(s, COND_AL, args[0], args[1]);
1667
        break;
1668

    
1669
    default:
1670
        tcg_abort();
1671
    }
1672
}
1673

    
1674
static const TCGTargetOpDef arm_op_defs[] = {
1675
    { INDEX_op_exit_tb, { } },
1676
    { INDEX_op_goto_tb, { } },
1677
    { INDEX_op_call, { "ri" } },
1678
    { INDEX_op_jmp, { "ri" } },
1679
    { INDEX_op_br, { } },
1680

    
1681
    { INDEX_op_mov_i32, { "r", "r" } },
1682
    { INDEX_op_movi_i32, { "r" } },
1683

    
1684
    { INDEX_op_ld8u_i32, { "r", "r" } },
1685
    { INDEX_op_ld8s_i32, { "r", "r" } },
1686
    { INDEX_op_ld16u_i32, { "r", "r" } },
1687
    { INDEX_op_ld16s_i32, { "r", "r" } },
1688
    { INDEX_op_ld_i32, { "r", "r" } },
1689
    { INDEX_op_st8_i32, { "r", "r" } },
1690
    { INDEX_op_st16_i32, { "r", "r" } },
1691
    { INDEX_op_st_i32, { "r", "r" } },
1692

    
1693
    /* TODO: "r", "r", "ri" */
1694
    { INDEX_op_add_i32, { "r", "r", "rI" } },
1695
    { INDEX_op_sub_i32, { "r", "r", "rI" } },
1696
    { INDEX_op_mul_i32, { "r", "r", "r" } },
1697
    { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1698
    { INDEX_op_and_i32, { "r", "r", "rI" } },
1699
    { INDEX_op_andc_i32, { "r", "r", "rI" } },
1700
    { INDEX_op_or_i32, { "r", "r", "rI" } },
1701
    { INDEX_op_xor_i32, { "r", "r", "rI" } },
1702
    { INDEX_op_neg_i32, { "r", "r" } },
1703
    { INDEX_op_not_i32, { "r", "r" } },
1704

    
1705
    { INDEX_op_shl_i32, { "r", "r", "ri" } },
1706
    { INDEX_op_shr_i32, { "r", "r", "ri" } },
1707
    { INDEX_op_sar_i32, { "r", "r", "ri" } },
1708
    { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1709
    { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1710

    
1711
    { INDEX_op_brcond_i32, { "r", "rI" } },
1712
    { INDEX_op_setcond_i32, { "r", "r", "rI" } },
1713

    
1714
    /* TODO: "r", "r", "r", "r", "ri", "ri" */
1715
    { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
1716
    { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
1717
    { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
1718
    { INDEX_op_setcond2_i32, { "r", "r", "r", "r", "r" } },
1719

    
1720
#if TARGET_LONG_BITS == 32
1721
    { INDEX_op_qemu_ld8u, { "r", "l" } },
1722
    { INDEX_op_qemu_ld8s, { "r", "l" } },
1723
    { INDEX_op_qemu_ld16u, { "r", "l" } },
1724
    { INDEX_op_qemu_ld16s, { "r", "l" } },
1725
    { INDEX_op_qemu_ld32, { "r", "l" } },
1726
    { INDEX_op_qemu_ld64, { "L", "L", "l" } },
1727

    
1728
    { INDEX_op_qemu_st8, { "s", "s" } },
1729
    { INDEX_op_qemu_st16, { "s", "s" } },
1730
    { INDEX_op_qemu_st32, { "s", "s" } },
1731
    { INDEX_op_qemu_st64, { "S", "S", "s" } },
1732
#else
1733
    { INDEX_op_qemu_ld8u, { "r", "l", "l" } },
1734
    { INDEX_op_qemu_ld8s, { "r", "l", "l" } },
1735
    { INDEX_op_qemu_ld16u, { "r", "l", "l" } },
1736
    { INDEX_op_qemu_ld16s, { "r", "l", "l" } },
1737
    { INDEX_op_qemu_ld32, { "r", "l", "l" } },
1738
    { INDEX_op_qemu_ld64, { "L", "L", "l", "l" } },
1739

    
1740
    { INDEX_op_qemu_st8, { "s", "s", "s" } },
1741
    { INDEX_op_qemu_st16, { "s", "s", "s" } },
1742
    { INDEX_op_qemu_st32, { "s", "s", "s" } },
1743
    { INDEX_op_qemu_st64, { "S", "S", "s", "s" } },
1744
#endif
1745

    
1746
    { INDEX_op_bswap16_i32, { "r", "r" } },
1747
    { INDEX_op_bswap32_i32, { "r", "r" } },
1748

    
1749
    { INDEX_op_ext8s_i32, { "r", "r" } },
1750
    { INDEX_op_ext16s_i32, { "r", "r" } },
1751
    { INDEX_op_ext16u_i32, { "r", "r" } },
1752

    
1753
    { -1 },
1754
};
1755

    
1756
void tcg_target_init(TCGContext *s)
1757
{
1758
#if !defined(CONFIG_USER_ONLY)
1759
    /* fail safe */
1760
    if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1761
        tcg_abort();
1762
#endif
1763

    
1764
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
1765
    tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1766
                     (1 << TCG_REG_R0) |
1767
                     (1 << TCG_REG_R1) |
1768
                     (1 << TCG_REG_R2) |
1769
                     (1 << TCG_REG_R3) |
1770
                     (1 << TCG_REG_R12) |
1771
                     (1 << TCG_REG_R14));
1772

    
1773
    tcg_regset_clear(s->reserved_regs);
1774
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1775
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
1776
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
1777

    
1778
    tcg_add_target_add_op_defs(arm_op_defs);
1779
}
1780

    
1781
static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
1782
                int arg1, tcg_target_long arg2)
1783
{
1784
    tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
1785
}
1786

    
1787
static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
1788
                int arg1, tcg_target_long arg2)
1789
{
1790
    tcg_out_st32(s, COND_AL, arg, arg1, arg2);
1791
}
1792

    
1793
static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
1794
{
1795
    if (val > 0)
1796
        if (val < 0x100)
1797
            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val);
1798
        else
1799
            tcg_abort();
1800
    else if (val < 0) {
1801
        if (val > -0x100)
1802
            tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val);
1803
        else
1804
            tcg_abort();
1805
    }
1806
}
1807

    
1808
static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
1809
{
1810
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
1811
}
1812

    
1813
static inline void tcg_out_movi(TCGContext *s, TCGType type,
1814
                int ret, tcg_target_long arg)
1815
{
1816
    tcg_out_movi32(s, COND_AL, ret, arg);
1817
}
1818

    
1819
void tcg_target_qemu_prologue(TCGContext *s)
1820
{
1821
    /* There is no need to save r7, it is used to store the address
1822
       of the env structure and is not modified by GCC. */
1823

    
1824
    /* stmdb sp!, { r4 - r6, r8 - r11, lr } */
1825
    tcg_out32(s, (COND_AL << 28) | 0x092d4f70);
1826

    
1827
    tcg_out_bx(s, COND_AL, TCG_REG_R0);
1828
    tb_ret_addr = s->code_ptr;
1829

    
1830
    /* ldmia sp!, { r4 - r6, r8 - r11, pc } */
1831
    tcg_out32(s, (COND_AL << 28) | 0x08bd8f70);
1832
}