Statistics
| Branch: | Revision:

root / tcg / arm / tcg-target.c @ c66b5c2c

History | View | Annotate | Download (58.3 kB)

1
/*
2
 * Tiny Code Generator for QEMU
3
 *
4
 * Copyright (c) 2008 Andrzej Zaborowski
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#if defined(__ARM_ARCH_7__) ||  \
26
    defined(__ARM_ARCH_7A__) || \
27
    defined(__ARM_ARCH_7EM__) || \
28
    defined(__ARM_ARCH_7M__) || \
29
    defined(__ARM_ARCH_7R__)
30
#define USE_ARMV7_INSTRUCTIONS
31
#endif
32

    
33
#if defined(USE_ARMV7_INSTRUCTIONS) || \
34
    defined(__ARM_ARCH_6J__) || \
35
    defined(__ARM_ARCH_6K__) || \
36
    defined(__ARM_ARCH_6T2__) || \
37
    defined(__ARM_ARCH_6Z__) || \
38
    defined(__ARM_ARCH_6ZK__)
39
#define USE_ARMV6_INSTRUCTIONS
40
#endif
41

    
42
#if defined(USE_ARMV6_INSTRUCTIONS) || \
43
    defined(__ARM_ARCH_5T__) || \
44
    defined(__ARM_ARCH_5TE__) || \
45
    defined(__ARM_ARCH_5TEJ__)
46
#define USE_ARMV5_INSTRUCTIONS
47
#endif
48

    
49
#ifdef USE_ARMV5_INSTRUCTIONS
50
static const int use_armv5_instructions = 1;
51
#else
52
static const int use_armv5_instructions = 0;
53
#endif
54
#undef USE_ARMV5_INSTRUCTIONS
55

    
56
#ifdef USE_ARMV6_INSTRUCTIONS
57
static const int use_armv6_instructions = 1;
58
#else
59
static const int use_armv6_instructions = 0;
60
#endif
61
#undef USE_ARMV6_INSTRUCTIONS
62

    
63
#ifdef USE_ARMV7_INSTRUCTIONS
64
static const int use_armv7_instructions = 1;
65
#else
66
static const int use_armv7_instructions = 0;
67
#endif
68
#undef USE_ARMV7_INSTRUCTIONS
69

    
70
#ifndef NDEBUG
71
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
72
    "%r0",
73
    "%r1",
74
    "%r2",
75
    "%r3",
76
    "%r4",
77
    "%r5",
78
    "%r6",
79
    "%r7",
80
    "%r8",
81
    "%r9",
82
    "%r10",
83
    "%r11",
84
    "%r12",
85
    "%r13",
86
    "%r14",
87
    "%pc",
88
};
89
#endif
90

    
91
static const int tcg_target_reg_alloc_order[] = {
92
    TCG_REG_R4,
93
    TCG_REG_R5,
94
    TCG_REG_R6,
95
    TCG_REG_R7,
96
    TCG_REG_R8,
97
    TCG_REG_R9,
98
    TCG_REG_R10,
99
    TCG_REG_R11,
100
    TCG_REG_R13,
101
    TCG_REG_R0,
102
    TCG_REG_R1,
103
    TCG_REG_R2,
104
    TCG_REG_R3,
105
    TCG_REG_R12,
106
    TCG_REG_R14,
107
};
108

    
109
static const int tcg_target_call_iarg_regs[4] = {
110
    TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
111
};
112
static const int tcg_target_call_oarg_regs[2] = {
113
    TCG_REG_R0, TCG_REG_R1
114
};
115

    
116
static void patch_reloc(uint8_t *code_ptr, int type,
117
                tcg_target_long value, tcg_target_long addend)
118
{
119
    switch (type) {
120
    case R_ARM_ABS32:
121
        *(uint32_t *) code_ptr = value;
122
        break;
123

    
124
    case R_ARM_CALL:
125
    case R_ARM_JUMP24:
126
    default:
127
        tcg_abort();
128

    
129
    case R_ARM_PC24:
130
        *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & 0xff000000) |
131
                (((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff);
132
        break;
133
    }
134
}
135

    
136
/* maximum number of register used for input function arguments */
137
static inline int tcg_target_get_call_iarg_regs_count(int flags)
138
{
139
    return 4;
140
}
141

    
142
/* parse target specific constraints */
143
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
144
{
145
    const char *ct_str;
146

    
147
    ct_str = *pct_str;
148
    switch (ct_str[0]) {
149
    case 'I':
150
         ct->ct |= TCG_CT_CONST_ARM;
151
         break;
152

    
153
    case 'r':
154
        ct->ct |= TCG_CT_REG;
155
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
156
        break;
157

    
158
    /* qemu_ld address */
159
    case 'l':
160
        ct->ct |= TCG_CT_REG;
161
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
162
#ifdef CONFIG_SOFTMMU
163
        /* r0 and r1 will be overwritten when reading the tlb entry,
164
           so don't use these. */
165
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
166
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
167
#endif
168
        break;
169
    case 'L':
170
        ct->ct |= TCG_CT_REG;
171
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
172
#ifdef CONFIG_SOFTMMU
173
        /* r1 is still needed to load data_reg or data_reg2,
174
           so don't use it. */
175
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
176
#endif
177
        break;
178

    
179
    /* qemu_st address & data_reg */
180
    case 's':
181
        ct->ct |= TCG_CT_REG;
182
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
183
        /* r0 and r1 will be overwritten when reading the tlb entry
184
           (softmmu only) and doing the byte swapping, so don't
185
           use these. */
186
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
187
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
188
        break;
189
    /* qemu_st64 data_reg2 */
190
    case 'S':
191
        ct->ct |= TCG_CT_REG;
192
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
193
        /* r0 and r1 will be overwritten when reading the tlb entry
194
            (softmmu only) and doing the byte swapping, so don't
195
            use these. */
196
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
197
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
198
#ifdef CONFIG_SOFTMMU
199
        /* r2 is still needed to load data_reg, so don't use it. */
200
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
201
#endif
202
        break;
203

    
204
    default:
205
        return -1;
206
    }
207
    ct_str++;
208
    *pct_str = ct_str;
209

    
210
    return 0;
211
}
212

    
213
static inline uint32_t rotl(uint32_t val, int n)
214
{
215
  return (val << n) | (val >> (32 - n));
216
}
217

    
218
/* ARM immediates for ALU instructions are made of an unsigned 8-bit
219
   right-rotated by an even amount between 0 and 30. */
220
static inline int encode_imm(uint32_t imm)
221
{
222
    int shift;
223

    
224
    /* simple case, only lower bits */
225
    if ((imm & ~0xff) == 0)
226
        return 0;
227
    /* then try a simple even shift */
228
    shift = ctz32(imm) & ~1;
229
    if (((imm >> shift) & ~0xff) == 0)
230
        return 32 - shift;
231
    /* now try harder with rotations */
232
    if ((rotl(imm, 2) & ~0xff) == 0)
233
        return 2;
234
    if ((rotl(imm, 4) & ~0xff) == 0)
235
        return 4;
236
    if ((rotl(imm, 6) & ~0xff) == 0)
237
        return 6;
238
    /* imm can't be encoded */
239
    return -1;
240
}
241

    
242
static inline int check_fit_imm(uint32_t imm)
243
{
244
    return encode_imm(imm) >= 0;
245
}
246

    
247
/* Test if a constant matches the constraint.
248
 * TODO: define constraints for:
249
 *
250
 * ldr/str offset:   between -0xfff and 0xfff
251
 * ldrh/strh offset: between -0xff and 0xff
252
 * mov operand2:     values represented with x << (2 * y), x < 0x100
253
 * add, sub, eor...: ditto
254
 */
255
static inline int tcg_target_const_match(tcg_target_long val,
256
                const TCGArgConstraint *arg_ct)
257
{
258
    int ct;
259
    ct = arg_ct->ct;
260
    if (ct & TCG_CT_CONST)
261
        return 1;
262
    else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val))
263
        return 1;
264
    else
265
        return 0;
266
}
267

    
268
enum arm_data_opc_e {
269
    ARITH_AND = 0x0,
270
    ARITH_EOR = 0x1,
271
    ARITH_SUB = 0x2,
272
    ARITH_RSB = 0x3,
273
    ARITH_ADD = 0x4,
274
    ARITH_ADC = 0x5,
275
    ARITH_SBC = 0x6,
276
    ARITH_RSC = 0x7,
277
    ARITH_TST = 0x8,
278
    ARITH_CMP = 0xa,
279
    ARITH_CMN = 0xb,
280
    ARITH_ORR = 0xc,
281
    ARITH_MOV = 0xd,
282
    ARITH_BIC = 0xe,
283
    ARITH_MVN = 0xf,
284
};
285

    
286
#define TO_CPSR(opc) \
287
  ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
288

    
289
#define SHIFT_IMM_LSL(im)        (((im) << 7) | 0x00)
290
#define SHIFT_IMM_LSR(im)        (((im) << 7) | 0x20)
291
#define SHIFT_IMM_ASR(im)        (((im) << 7) | 0x40)
292
#define SHIFT_IMM_ROR(im)        (((im) << 7) | 0x60)
293
#define SHIFT_REG_LSL(rs)        (((rs) << 8) | 0x10)
294
#define SHIFT_REG_LSR(rs)        (((rs) << 8) | 0x30)
295
#define SHIFT_REG_ASR(rs)        (((rs) << 8) | 0x50)
296
#define SHIFT_REG_ROR(rs)        (((rs) << 8) | 0x70)
297

    
298
enum arm_cond_code_e {
299
    COND_EQ = 0x0,
300
    COND_NE = 0x1,
301
    COND_CS = 0x2,        /* Unsigned greater or equal */
302
    COND_CC = 0x3,        /* Unsigned less than */
303
    COND_MI = 0x4,        /* Negative */
304
    COND_PL = 0x5,        /* Zero or greater */
305
    COND_VS = 0x6,        /* Overflow */
306
    COND_VC = 0x7,        /* No overflow */
307
    COND_HI = 0x8,        /* Unsigned greater than */
308
    COND_LS = 0x9,        /* Unsigned less or equal */
309
    COND_GE = 0xa,
310
    COND_LT = 0xb,
311
    COND_GT = 0xc,
312
    COND_LE = 0xd,
313
    COND_AL = 0xe,
314
};
315

    
316
static const uint8_t tcg_cond_to_arm_cond[10] = {
317
    [TCG_COND_EQ] = COND_EQ,
318
    [TCG_COND_NE] = COND_NE,
319
    [TCG_COND_LT] = COND_LT,
320
    [TCG_COND_GE] = COND_GE,
321
    [TCG_COND_LE] = COND_LE,
322
    [TCG_COND_GT] = COND_GT,
323
    /* unsigned */
324
    [TCG_COND_LTU] = COND_CC,
325
    [TCG_COND_GEU] = COND_CS,
326
    [TCG_COND_LEU] = COND_LS,
327
    [TCG_COND_GTU] = COND_HI,
328
};
329

    
330
static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
331
{
332
    tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
333
}
334

    
335
static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
336
{
337
    tcg_out32(s, (cond << 28) | 0x0a000000 |
338
                    (((offset - 8) >> 2) & 0x00ffffff));
339
}
340

    
341
static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
342
{
343
#ifdef HOST_WORDS_BIGENDIAN
344
    tcg_out8(s, (cond << 4) | 0x0a);
345
    s->code_ptr += 3;
346
#else
347
    s->code_ptr += 3;
348
    tcg_out8(s, (cond << 4) | 0x0a);
349
#endif
350
}
351

    
352
static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
353
{
354
    tcg_out32(s, (cond << 28) | 0x0b000000 |
355
                    (((offset - 8) >> 2) & 0x00ffffff));
356
}
357

    
358
static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
359
{
360
    tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
361
}
362

    
363
static inline void tcg_out_dat_reg(TCGContext *s,
364
                int cond, int opc, int rd, int rn, int rm, int shift)
365
{
366
    tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
367
                    (rn << 16) | (rd << 12) | shift | rm);
368
}
369

    
370
static inline void tcg_out_dat_reg2(TCGContext *s,
371
                int cond, int opc0, int opc1, int rd0, int rd1,
372
                int rn0, int rn1, int rm0, int rm1, int shift)
373
{
374
    if (rd0 == rn1 || rd0 == rm1) {
375
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
376
                        (rn0 << 16) | (8 << 12) | shift | rm0);
377
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
378
                        (rn1 << 16) | (rd1 << 12) | shift | rm1);
379
        tcg_out_dat_reg(s, cond, ARITH_MOV,
380
                        rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
381
    } else {
382
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
383
                        (rn0 << 16) | (rd0 << 12) | shift | rm0);
384
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
385
                        (rn1 << 16) | (rd1 << 12) | shift | rm1);
386
    }
387
}
388

    
389
static inline void tcg_out_dat_imm(TCGContext *s,
390
                int cond, int opc, int rd, int rn, int im)
391
{
392
    tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
393
                    (rn << 16) | (rd << 12) | im);
394
}
395

    
396
static inline void tcg_out_movi32(TCGContext *s,
397
                int cond, int rd, int32_t arg)
398
{
399
    /* TODO: This is very suboptimal, we can easily have a constant
400
     * pool somewhere after all the instructions.  */
401

    
402
    if (arg < 0 && arg > -0x100)
403
        return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
404

    
405
    if (use_armv7_instructions) {
406
        /* use movw/movt */
407
        /* movw */
408
        tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
409
                  | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
410
        if (arg & 0xffff0000)
411
            /* movt */
412
            tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
413
                      | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
414
    } else {
415
        tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff);
416
        if (arg & 0x0000ff00)
417
            tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
418
                            ((arg >>  8) & 0xff) | 0xc00);
419
        if (arg & 0x00ff0000)
420
            tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
421
                            ((arg >> 16) & 0xff) | 0x800);
422
        if (arg & 0xff000000)
423
            tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
424
                            ((arg >> 24) & 0xff) | 0x400);
425
        }
426
}
427

    
428
static inline void tcg_out_mul32(TCGContext *s,
429
                int cond, int rd, int rs, int rm)
430
{
431
    if (rd != rm)
432
        tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
433
                        (rs << 8) | 0x90 | rm);
434
    else if (rd != rs)
435
        tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
436
                        (rm << 8) | 0x90 | rs);
437
    else {
438
        tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
439
                        (rs << 8) | 0x90 | rm);
440
        tcg_out_dat_reg(s, cond, ARITH_MOV,
441
                        rd, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
442
    }
443
}
444

    
445
static inline void tcg_out_umull32(TCGContext *s,
446
                int cond, int rd0, int rd1, int rs, int rm)
447
{
448
    if (rd0 != rm && rd1 != rm)
449
        tcg_out32(s, (cond << 28) | 0x800090 |
450
                        (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
451
    else if (rd0 != rs && rd1 != rs)
452
        tcg_out32(s, (cond << 28) | 0x800090 |
453
                        (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
454
    else {
455
        tcg_out_dat_reg(s, cond, ARITH_MOV,
456
                        TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
457
        tcg_out32(s, (cond << 28) | 0x800098 |
458
                        (rd1 << 16) | (rd0 << 12) | (rs << 8));
459
    }
460
}
461

    
462
static inline void tcg_out_smull32(TCGContext *s,
463
                int cond, int rd0, int rd1, int rs, int rm)
464
{
465
    if (rd0 != rm && rd1 != rm)
466
        tcg_out32(s, (cond << 28) | 0xc00090 |
467
                        (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
468
    else if (rd0 != rs && rd1 != rs)
469
        tcg_out32(s, (cond << 28) | 0xc00090 |
470
                        (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
471
    else {
472
        tcg_out_dat_reg(s, cond, ARITH_MOV,
473
                        TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
474
        tcg_out32(s, (cond << 28) | 0xc00098 |
475
                        (rd1 << 16) | (rd0 << 12) | (rs << 8));
476
    }
477
}
478

    
479
static inline void tcg_out_ext8s(TCGContext *s, int cond,
480
                                 int rd, int rn)
481
{
482
    if (use_armv6_instructions) {
483
        /* sxtb */
484
        tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
485
    } else {
486
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
487
                        rd, 0, rn, SHIFT_IMM_LSL(24));
488
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
489
                        rd, 0, rd, SHIFT_IMM_ASR(24));
490
    }
491
}
492

    
493
static inline void tcg_out_ext8u(TCGContext *s, int cond,
494
                                 int rd, int rn)
495
{
496
    tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
497
}
498

    
499
static inline void tcg_out_ext16s(TCGContext *s, int cond,
500
                                  int rd, int rn)
501
{
502
    if (use_armv6_instructions) {
503
        /* sxth */
504
        tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
505
    } else {
506
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
507
                        rd, 0, rn, SHIFT_IMM_LSL(16));
508
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
509
                        rd, 0, rd, SHIFT_IMM_ASR(16));
510
    }
511
}
512

    
513
static inline void tcg_out_ext16u(TCGContext *s, int cond,
514
                                  int rd, int rn)
515
{
516
    if (use_armv6_instructions) {
517
        /* uxth */
518
        tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
519
    } else {
520
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
521
                        rd, 0, rn, SHIFT_IMM_LSL(16));
522
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
523
                        rd, 0, rd, SHIFT_IMM_LSR(16));
524
    }
525
}
526

    
527
static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
528
{
529
    if (use_armv6_instructions) {
530
        /* revsh */
531
        tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
532
    } else {
533
        tcg_out_dat_reg(s, cond, ARITH_MOV,
534
                        TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
535
        tcg_out_dat_reg(s, cond, ARITH_MOV,
536
                        TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_ASR(16));
537
        tcg_out_dat_reg(s, cond, ARITH_ORR,
538
                        rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
539
    }
540
}
541

    
542
static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
543
{
544
    if (use_armv6_instructions) {
545
        /* rev16 */
546
        tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
547
    } else {
548
        tcg_out_dat_reg(s, cond, ARITH_MOV,
549
                        TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
550
        tcg_out_dat_reg(s, cond, ARITH_MOV,
551
                        TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_LSR(16));
552
        tcg_out_dat_reg(s, cond, ARITH_ORR,
553
                        rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
554
    }
555
}
556

    
557
static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
558
{
559
    if (use_armv6_instructions) {
560
        /* rev */
561
        tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
562
    } else {
563
        tcg_out_dat_reg(s, cond, ARITH_EOR,
564
                        TCG_REG_R8, rn, rn, SHIFT_IMM_ROR(16));
565
        tcg_out_dat_imm(s, cond, ARITH_BIC,
566
                        TCG_REG_R8, TCG_REG_R8, 0xff | 0x800);
567
        tcg_out_dat_reg(s, cond, ARITH_MOV,
568
                        rd, 0, rn, SHIFT_IMM_ROR(8));
569
        tcg_out_dat_reg(s, cond, ARITH_EOR,
570
                        rd, rd, TCG_REG_R8, SHIFT_IMM_LSR(8));
571
    }
572
}
573

    
574
static inline void tcg_out_ld32_12(TCGContext *s, int cond,
575
                int rd, int rn, tcg_target_long im)
576
{
577
    if (im >= 0)
578
        tcg_out32(s, (cond << 28) | 0x05900000 |
579
                        (rn << 16) | (rd << 12) | (im & 0xfff));
580
    else
581
        tcg_out32(s, (cond << 28) | 0x05100000 |
582
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
583
}
584

    
585
static inline void tcg_out_st32_12(TCGContext *s, int cond,
586
                int rd, int rn, tcg_target_long im)
587
{
588
    if (im >= 0)
589
        tcg_out32(s, (cond << 28) | 0x05800000 |
590
                        (rn << 16) | (rd << 12) | (im & 0xfff));
591
    else
592
        tcg_out32(s, (cond << 28) | 0x05000000 |
593
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
594
}
595

    
596
static inline void tcg_out_ld32_r(TCGContext *s, int cond,
597
                int rd, int rn, int rm)
598
{
599
    tcg_out32(s, (cond << 28) | 0x07900000 |
600
                    (rn << 16) | (rd << 12) | rm);
601
}
602

    
603
static inline void tcg_out_st32_r(TCGContext *s, int cond,
604
                int rd, int rn, int rm)
605
{
606
    tcg_out32(s, (cond << 28) | 0x07800000 |
607
                    (rn << 16) | (rd << 12) | rm);
608
}
609

    
610
/* Register pre-increment with base writeback.  */
611
static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
612
                int rd, int rn, int rm)
613
{
614
    tcg_out32(s, (cond << 28) | 0x07b00000 |
615
                    (rn << 16) | (rd << 12) | rm);
616
}
617

    
618
static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
619
                int rd, int rn, int rm)
620
{
621
    tcg_out32(s, (cond << 28) | 0x07a00000 |
622
                    (rn << 16) | (rd << 12) | rm);
623
}
624

    
625
static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
626
                int rd, int rn, tcg_target_long im)
627
{
628
    if (im >= 0)
629
        tcg_out32(s, (cond << 28) | 0x01d000b0 |
630
                        (rn << 16) | (rd << 12) |
631
                        ((im & 0xf0) << 4) | (im & 0xf));
632
    else
633
        tcg_out32(s, (cond << 28) | 0x015000b0 |
634
                        (rn << 16) | (rd << 12) |
635
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
636
}
637

    
638
static inline void tcg_out_st16_8(TCGContext *s, int cond,
639
                int rd, int rn, tcg_target_long im)
640
{
641
    if (im >= 0)
642
        tcg_out32(s, (cond << 28) | 0x01c000b0 |
643
                        (rn << 16) | (rd << 12) |
644
                        ((im & 0xf0) << 4) | (im & 0xf));
645
    else
646
        tcg_out32(s, (cond << 28) | 0x014000b0 |
647
                        (rn << 16) | (rd << 12) |
648
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
649
}
650

    
651
static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
652
                int rd, int rn, int rm)
653
{
654
    tcg_out32(s, (cond << 28) | 0x019000b0 |
655
                    (rn << 16) | (rd << 12) | rm);
656
}
657

    
658
static inline void tcg_out_st16_r(TCGContext *s, int cond,
659
                int rd, int rn, int rm)
660
{
661
    tcg_out32(s, (cond << 28) | 0x018000b0 |
662
                    (rn << 16) | (rd << 12) | rm);
663
}
664

    
665
static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
666
                int rd, int rn, tcg_target_long im)
667
{
668
    if (im >= 0)
669
        tcg_out32(s, (cond << 28) | 0x01d000f0 |
670
                        (rn << 16) | (rd << 12) |
671
                        ((im & 0xf0) << 4) | (im & 0xf));
672
    else
673
        tcg_out32(s, (cond << 28) | 0x015000f0 |
674
                        (rn << 16) | (rd << 12) |
675
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
676
}
677

    
678
static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
679
                int rd, int rn, int rm)
680
{
681
    tcg_out32(s, (cond << 28) | 0x019000f0 |
682
                    (rn << 16) | (rd << 12) | rm);
683
}
684

    
685
static inline void tcg_out_ld8_12(TCGContext *s, int cond,
686
                int rd, int rn, tcg_target_long im)
687
{
688
    if (im >= 0)
689
        tcg_out32(s, (cond << 28) | 0x05d00000 |
690
                        (rn << 16) | (rd << 12) | (im & 0xfff));
691
    else
692
        tcg_out32(s, (cond << 28) | 0x05500000 |
693
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
694
}
695

    
696
static inline void tcg_out_st8_12(TCGContext *s, int cond,
697
                int rd, int rn, tcg_target_long im)
698
{
699
    if (im >= 0)
700
        tcg_out32(s, (cond << 28) | 0x05c00000 |
701
                        (rn << 16) | (rd << 12) | (im & 0xfff));
702
    else
703
        tcg_out32(s, (cond << 28) | 0x05400000 |
704
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
705
}
706

    
707
static inline void tcg_out_ld8_r(TCGContext *s, int cond,
708
                int rd, int rn, int rm)
709
{
710
    tcg_out32(s, (cond << 28) | 0x07d00000 |
711
                    (rn << 16) | (rd << 12) | rm);
712
}
713

    
714
static inline void tcg_out_st8_r(TCGContext *s, int cond,
715
                int rd, int rn, int rm)
716
{
717
    tcg_out32(s, (cond << 28) | 0x07c00000 |
718
                    (rn << 16) | (rd << 12) | rm);
719
}
720

    
721
static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
722
                int rd, int rn, tcg_target_long im)
723
{
724
    if (im >= 0)
725
        tcg_out32(s, (cond << 28) | 0x01d000d0 |
726
                        (rn << 16) | (rd << 12) |
727
                        ((im & 0xf0) << 4) | (im & 0xf));
728
    else
729
        tcg_out32(s, (cond << 28) | 0x015000d0 |
730
                        (rn << 16) | (rd << 12) |
731
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
732
}
733

    
734
static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
735
                int rd, int rn, int rm)
736
{
737
    tcg_out32(s, (cond << 28) | 0x019000d0 |
738
                    (rn << 16) | (rd << 12) | rm);
739
}
740

    
741
static inline void tcg_out_ld32u(TCGContext *s, int cond,
742
                int rd, int rn, int32_t offset)
743
{
744
    if (offset > 0xfff || offset < -0xfff) {
745
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
746
        tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
747
    } else
748
        tcg_out_ld32_12(s, cond, rd, rn, offset);
749
}
750

    
751
static inline void tcg_out_st32(TCGContext *s, int cond,
752
                int rd, int rn, int32_t offset)
753
{
754
    if (offset > 0xfff || offset < -0xfff) {
755
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
756
        tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
757
    } else
758
        tcg_out_st32_12(s, cond, rd, rn, offset);
759
}
760

    
761
static inline void tcg_out_ld16u(TCGContext *s, int cond,
762
                int rd, int rn, int32_t offset)
763
{
764
    if (offset > 0xff || offset < -0xff) {
765
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
766
        tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
767
    } else
768
        tcg_out_ld16u_8(s, cond, rd, rn, offset);
769
}
770

    
771
static inline void tcg_out_ld16s(TCGContext *s, int cond,
772
                int rd, int rn, int32_t offset)
773
{
774
    if (offset > 0xff || offset < -0xff) {
775
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
776
        tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
777
    } else
778
        tcg_out_ld16s_8(s, cond, rd, rn, offset);
779
}
780

    
781
static inline void tcg_out_st16(TCGContext *s, int cond,
782
                int rd, int rn, int32_t offset)
783
{
784
    if (offset > 0xff || offset < -0xff) {
785
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
786
        tcg_out_st16_r(s, cond, rd, rn, TCG_REG_R8);
787
    } else
788
        tcg_out_st16_8(s, cond, rd, rn, offset);
789
}
790

    
791
static inline void tcg_out_ld8u(TCGContext *s, int cond,
792
                int rd, int rn, int32_t offset)
793
{
794
    if (offset > 0xfff || offset < -0xfff) {
795
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
796
        tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
797
    } else
798
        tcg_out_ld8_12(s, cond, rd, rn, offset);
799
}
800

    
801
static inline void tcg_out_ld8s(TCGContext *s, int cond,
802
                int rd, int rn, int32_t offset)
803
{
804
    if (offset > 0xff || offset < -0xff) {
805
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
806
        tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
807
    } else
808
        tcg_out_ld8s_8(s, cond, rd, rn, offset);
809
}
810

    
811
static inline void tcg_out_st8(TCGContext *s, int cond,
812
                int rd, int rn, int32_t offset)
813
{
814
    if (offset > 0xfff || offset < -0xfff) {
815
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
816
        tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
817
    } else
818
        tcg_out_st8_12(s, cond, rd, rn, offset);
819
}
820

    
821
static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
822
{
823
    int32_t val;
824

    
825
    val = addr - (tcg_target_long) s->code_ptr;
826
    if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
827
        tcg_out_b(s, cond, val);
828
    else {
829
#if 1
830
        tcg_abort();
831
#else
832
        if (cond == COND_AL) {
833
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
834
            tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
835
        } else {
836
            tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
837
            tcg_out_dat_reg(s, cond, ARITH_ADD,
838
                            TCG_REG_PC, TCG_REG_PC,
839
                            TCG_REG_R8, SHIFT_IMM_LSL(0));
840
        }
841
#endif
842
    }
843
}
844

    
845
static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr)
846
{
847
    int32_t val;
848

    
849
    val = addr - (tcg_target_long) s->code_ptr;
850
    if (val < 0x01fffffd && val > -0x01fffffd)
851
        tcg_out_bl(s, cond, val);
852
    else {
853
#if 1
854
        tcg_abort();
855
#else
856
        if (cond == COND_AL) {
857
            tcg_out_dat_imm(s, cond, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
858
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
859
            tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
860
        } else {
861
            tcg_out_movi32(s, cond, TCG_REG_R9, addr);
862
            tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
863
                            TCG_REG_PC, SHIFT_IMM_LSL(0));
864
            tcg_out_bx(s, cond, TCG_REG_R9);
865
        }
866
#endif
867
    }
868
}
869

    
870
static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
871
{
872
    if (use_armv5_instructions) {
873
        tcg_out_blx(s, cond, arg);
874
    } else {
875
        tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
876
                        TCG_REG_PC, SHIFT_IMM_LSL(0));
877
        tcg_out_bx(s, cond, arg);
878
    }
879
}
880

    
881
static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
882
{
883
    TCGLabel *l = &s->labels[label_index];
884

    
885
    if (l->has_value)
886
        tcg_out_goto(s, cond, l->u.value);
887
    else if (cond == COND_AL) {
888
        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
889
        tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
890
        s->code_ptr += 4;
891
    } else {
892
        /* Probably this should be preferred even for COND_AL... */
893
        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
894
        tcg_out_b_noaddr(s, cond);
895
    }
896
}
897

    
898
#ifdef CONFIG_SOFTMMU
899

    
900
#include "../../softmmu_defs.h"
901

    
902
static void *qemu_ld_helpers[4] = {
903
    __ldb_mmu,
904
    __ldw_mmu,
905
    __ldl_mmu,
906
    __ldq_mmu,
907
};
908

    
909
static void *qemu_st_helpers[4] = {
910
    __stb_mmu,
911
    __stw_mmu,
912
    __stl_mmu,
913
    __stq_mmu,
914
};
915
#endif
916

    
917
#define TLB_SHIFT        (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
918

    
919
static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
920
{
921
    int addr_reg, data_reg, data_reg2, bswap;
922
#ifdef CONFIG_SOFTMMU
923
    int mem_index, s_bits;
924
# if TARGET_LONG_BITS == 64
925
    int addr_reg2;
926
# endif
927
    uint32_t *label_ptr;
928
#endif
929

    
930
#ifdef TARGET_WORDS_BIGENDIAN
931
    bswap = 1;
932
#else
933
    bswap = 0;
934
#endif
935
    data_reg = *args++;
936
    if (opc == 3)
937
        data_reg2 = *args++;
938
    else
939
        data_reg2 = 0; /* suppress warning */
940
    addr_reg = *args++;
941
#ifdef CONFIG_SOFTMMU
942
# if TARGET_LONG_BITS == 64
943
    addr_reg2 = *args++;
944
# endif
945
    mem_index = *args;
946
    s_bits = opc & 3;
947

    
948
    /* Should generate something like the following:
949
     *  shr r8, addr_reg, #TARGET_PAGE_BITS
950
     *  and r0, r8, #(CPU_TLB_SIZE - 1)   @ Assumption: CPU_TLB_BITS <= 8
951
     *  add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
952
     */
953
#  if CPU_TLB_BITS > 8
954
#   error
955
#  endif
956
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_R8,
957
                    0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
958
    tcg_out_dat_imm(s, COND_AL, ARITH_AND,
959
                    TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
960
    tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
961
                    TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
962
    /* In the
963
     *  ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
964
     * below, the offset is likely to exceed 12 bits if mem_index != 0 and
965
     * not exceed otherwise, so use an
966
     *  add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
967
     * before.
968
     */
969
    if (mem_index)
970
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
971
                        (mem_index << (TLB_SHIFT & 1)) |
972
                        ((16 - (TLB_SHIFT >> 1)) << 8));
973
    tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
974
                    offsetof(CPUState, tlb_table[0][0].addr_read));
975
    tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
976
                    TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
977
    /* Check alignment.  */
978
    if (s_bits)
979
        tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
980
                        0, addr_reg, (1 << s_bits) - 1);
981
#  if TARGET_LONG_BITS == 64
982
    /* XXX: possibly we could use a block data load or writeback in
983
     * the first access.  */
984
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
985
                    offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
986
    tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
987
                    TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
988
#  endif
989
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
990
                    offsetof(CPUState, tlb_table[0][0].addend));
991

    
992
    switch (opc) {
993
    case 0:
994
        tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
995
        break;
996
    case 0 | 4:
997
        tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
998
        break;
999
    case 1:
1000
        tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1001
        if (bswap) {
1002
            tcg_out_bswap16(s, COND_EQ, data_reg, data_reg);
1003
        }
1004
        break;
1005
    case 1 | 4:
1006
        if (bswap) {
1007
            tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1008
            tcg_out_bswap16s(s, COND_EQ, data_reg, data_reg);
1009
        } else {
1010
            tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1011
        }
1012
        break;
1013
    case 2:
1014
    default:
1015
        tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1016
        if (bswap) {
1017
            tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1018
        }
1019
        break;
1020
    case 3:
1021
        if (bswap) {
1022
            tcg_out_ld32_rwb(s, COND_EQ, data_reg2, TCG_REG_R1, addr_reg);
1023
            tcg_out_ld32_12(s, COND_EQ, data_reg, TCG_REG_R1, 4);
1024
            tcg_out_bswap32(s, COND_EQ, data_reg2, data_reg2);
1025
            tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1026
        } else {
1027
            tcg_out_ld32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1028
            tcg_out_ld32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1029
        }
1030
        break;
1031
    }
1032

    
1033
    label_ptr = (void *) s->code_ptr;
1034
    tcg_out_b(s, COND_EQ, 8);
1035

    
1036
    /* TODO: move this code to where the constants pool will be */
1037
    if (addr_reg != TCG_REG_R0) {
1038
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1039
                        TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1040
    }
1041
# if TARGET_LONG_BITS == 32
1042
    tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R1, 0, mem_index);
1043
# else
1044
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1045
                    TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1046
    tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1047
# endif
1048
    tcg_out_bl(s, COND_AL, (tcg_target_long) qemu_ld_helpers[s_bits] -
1049
                    (tcg_target_long) s->code_ptr);
1050

    
1051
    switch (opc) {
1052
    case 0 | 4:
1053
        tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0);
1054
        break;
1055
    case 1 | 4:
1056
        tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0);
1057
        break;
1058
    case 0:
1059
    case 1:
1060
    case 2:
1061
    default:
1062
        if (data_reg != TCG_REG_R0) {
1063
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1064
                            data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
1065
        }
1066
        break;
1067
    case 3:
1068
        if (data_reg != TCG_REG_R0) {
1069
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1070
                            data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
1071
        }
1072
        if (data_reg2 != TCG_REG_R1) {
1073
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1074
                            data_reg2, 0, TCG_REG_R1, SHIFT_IMM_LSL(0));
1075
        }
1076
        break;
1077
    }
1078

    
1079
    *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
1080
#else /* !CONFIG_SOFTMMU */
1081
    if (GUEST_BASE) {
1082
        uint32_t offset = GUEST_BASE;
1083
        int i;
1084
        int rot;
1085

    
1086
        while (offset) {
1087
            i = ctz32(offset) & ~1;
1088
            rot = ((32 - i) << 7) & 0xf00;
1089

    
1090
            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R8, addr_reg,
1091
                            ((offset >> i) & 0xff) | rot);
1092
            addr_reg = TCG_REG_R8;
1093
            offset &= ~(0xff << i);
1094
        }
1095
    }
1096
    switch (opc) {
1097
    case 0:
1098
        tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1099
        break;
1100
    case 0 | 4:
1101
        tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1102
        break;
1103
    case 1:
1104
        tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1105
        if (bswap) {
1106
            tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
1107
        }
1108
        break;
1109
    case 1 | 4:
1110
        if (bswap) {
1111
            tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1112
            tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
1113
        } else {
1114
            tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1115
        }
1116
        break;
1117
    case 2:
1118
    default:
1119
        tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1120
        if (bswap) {
1121
            tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1122
        }
1123
        break;
1124
    case 3:
1125
        /* TODO: use block load -
1126
         * check that data_reg2 > data_reg or the other way */
1127
        if (data_reg == addr_reg) {
1128
            tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1129
            tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1130
        } else {
1131
            tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1132
            tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1133
        }
1134
        if (bswap) {
1135
            tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1136
            tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
1137
        }
1138
        break;
1139
    }
1140
#endif
1141
}
1142

    
1143
static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1144
{
1145
    int addr_reg, data_reg, data_reg2, bswap;
1146
#ifdef CONFIG_SOFTMMU
1147
    int mem_index, s_bits;
1148
# if TARGET_LONG_BITS == 64
1149
    int addr_reg2;
1150
# endif
1151
    uint32_t *label_ptr;
1152
#endif
1153

    
1154
#ifdef TARGET_WORDS_BIGENDIAN
1155
    bswap = 1;
1156
#else
1157
    bswap = 0;
1158
#endif
1159
    data_reg = *args++;
1160
    if (opc == 3)
1161
        data_reg2 = *args++;
1162
    else
1163
        data_reg2 = 0; /* suppress warning */
1164
    addr_reg = *args++;
1165
#ifdef CONFIG_SOFTMMU
1166
# if TARGET_LONG_BITS == 64
1167
    addr_reg2 = *args++;
1168
# endif
1169
    mem_index = *args;
1170
    s_bits = opc & 3;
1171

    
1172
    /* Should generate something like the following:
1173
     *  shr r8, addr_reg, #TARGET_PAGE_BITS
1174
     *  and r0, r8, #(CPU_TLB_SIZE - 1)   @ Assumption: CPU_TLB_BITS <= 8
1175
     *  add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1176
     */
1177
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1178
                    TCG_REG_R8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1179
    tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1180
                    TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
1181
    tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
1182
                    TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
1183
    /* In the
1184
     *  ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1185
     * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1186
     * not exceed otherwise, so use an
1187
     *  add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1188
     * before.
1189
     */
1190
    if (mem_index)
1191
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
1192
                        (mem_index << (TLB_SHIFT & 1)) |
1193
                        ((16 - (TLB_SHIFT >> 1)) << 8));
1194
    tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
1195
                    offsetof(CPUState, tlb_table[0][0].addr_write));
1196
    tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
1197
                    TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1198
    /* Check alignment.  */
1199
    if (s_bits)
1200
        tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1201
                        0, addr_reg, (1 << s_bits) - 1);
1202
#  if TARGET_LONG_BITS == 64
1203
    /* XXX: possibly we could use a block data load or writeback in
1204
     * the first access.  */
1205
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1206
                    offsetof(CPUState, tlb_table[0][0].addr_write) + 4);
1207
    tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1208
                    TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
1209
#  endif
1210
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1211
                    offsetof(CPUState, tlb_table[0][0].addend));
1212

    
1213
    switch (opc) {
1214
    case 0:
1215
        tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1216
        break;
1217
    case 1:
1218
        if (bswap) {
1219
            tcg_out_bswap16(s, COND_EQ, TCG_REG_R0, data_reg);
1220
            tcg_out_st16_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1221
        } else {
1222
            tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1223
        }
1224
        break;
1225
    case 2:
1226
    default:
1227
        if (bswap) {
1228
            tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1229
            tcg_out_st32_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1230
        } else {
1231
            tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1232
        }
1233
        break;
1234
    case 3:
1235
        if (bswap) {
1236
            tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg2);
1237
            tcg_out_st32_rwb(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, addr_reg);
1238
            tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1239
            tcg_out_st32_12(s, COND_EQ, data_reg, TCG_REG_R1, 4);
1240
        } else {
1241
            tcg_out_st32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1242
            tcg_out_st32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1243
        }
1244
        break;
1245
    }
1246

    
1247
    label_ptr = (void *) s->code_ptr;
1248
    tcg_out_b(s, COND_EQ, 8);
1249

    
1250
    /* TODO: move this code to where the constants pool will be */
1251
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1252
                    TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1253
# if TARGET_LONG_BITS == 32
1254
    switch (opc) {
1255
    case 0:
1256
        tcg_out_ext8u(s, COND_AL, TCG_REG_R1, data_reg);
1257
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1258
        break;
1259
    case 1:
1260
        tcg_out_ext16u(s, COND_AL, TCG_REG_R1, data_reg);
1261
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1262
        break;
1263
    case 2:
1264
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1265
                        TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0));
1266
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1267
        break;
1268
    case 3:
1269
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R8, 0, mem_index);
1270
        tcg_out32(s, (COND_AL << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1271
        if (data_reg != TCG_REG_R2) {
1272
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1273
                            TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1274
        }
1275
        if (data_reg2 != TCG_REG_R3) {
1276
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1277
                            TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1278
        }
1279
        break;
1280
    }
1281
# else
1282
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1283
                    TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1284
    switch (opc) {
1285
    case 0:
1286
        tcg_out_ext8u(s, COND_AL, TCG_REG_R2, data_reg);
1287
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1288
        break;
1289
    case 1:
1290
        tcg_out_ext16u(s, COND_AL, TCG_REG_R2, data_reg);
1291
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1292
        break;
1293
    case 2:
1294
        if (data_reg != TCG_REG_R2) {
1295
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1296
                            TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1297
        }
1298
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1299
        break;
1300
    case 3:
1301
        tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R8, 0, mem_index);
1302
        tcg_out32(s, (COND_AL << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1303
        if (data_reg != TCG_REG_R2) {
1304
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1305
                            TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1306
        }
1307
        if (data_reg2 != TCG_REG_R3) {
1308
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1309
                            TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1310
        }
1311
        break;
1312
    }
1313
# endif
1314

    
1315
    tcg_out_bl(s, COND_AL, (tcg_target_long) qemu_st_helpers[s_bits] -
1316
                    (tcg_target_long) s->code_ptr);
1317
    if (opc == 3)
1318
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10);
1319

    
1320
    *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
1321
#else /* !CONFIG_SOFTMMU */
1322
    if (GUEST_BASE) {
1323
        uint32_t offset = GUEST_BASE;
1324
        int i;
1325
        int rot;
1326

    
1327
        while (offset) {
1328
            i = ctz32(offset) & ~1;
1329
            rot = ((32 - i) << 7) & 0xf00;
1330

    
1331
            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R1, addr_reg,
1332
                            ((offset >> i) & 0xff) | rot);
1333
            addr_reg = TCG_REG_R1;
1334
            offset &= ~(0xff << i);
1335
        }
1336
    }
1337
    switch (opc) {
1338
    case 0:
1339
        tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1340
        break;
1341
    case 1:
1342
        if (bswap) {
1343
            tcg_out_bswap16(s, COND_AL, TCG_REG_R0, data_reg);
1344
            tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1345
        } else {
1346
            tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
1347
        }
1348
        break;
1349
    case 2:
1350
    default:
1351
        if (bswap) {
1352
            tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1353
            tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1354
        } else {
1355
            tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1356
        }
1357
        break;
1358
    case 3:
1359
        /* TODO: use block store -
1360
         * check that data_reg2 > data_reg or the other way */
1361
        if (bswap) {
1362
            tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1363
            tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1364
            tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1365
            tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 4);
1366
        } else {
1367
            tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1368
            tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1369
        }
1370
        break;
1371
    }
1372
#endif
1373
}
1374

    
1375
static uint8_t *tb_ret_addr;
1376

    
1377
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1378
                const TCGArg *args, const int *const_args)
1379
{
1380
    int c;
1381

    
1382
    switch (opc) {
1383
    case INDEX_op_exit_tb:
1384
        {
1385
            uint8_t *ld_ptr = s->code_ptr;
1386
            if (args[0] >> 8)
1387
                tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1388
            else
1389
                tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
1390
            tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1391
            if (args[0] >> 8) {
1392
                *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1393
                tcg_out32(s, args[0]);
1394
            }
1395
        }
1396
        break;
1397
    case INDEX_op_goto_tb:
1398
        if (s->tb_jmp_offset) {
1399
            /* Direct jump method */
1400
#if defined(USE_DIRECT_JUMP)
1401
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1402
            tcg_out_b(s, COND_AL, 8);
1403
#else
1404
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1405
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1406
            tcg_out32(s, 0);
1407
#endif
1408
        } else {
1409
            /* Indirect jump method */
1410
#if 1
1411
            c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1412
            if (c > 0xfff || c < -0xfff) {
1413
                tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1414
                                (tcg_target_long) (s->tb_next + args[0]));
1415
                tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1416
            } else
1417
                tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
1418
#else
1419
            tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1420
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1421
            tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1422
#endif
1423
        }
1424
        s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1425
        break;
1426
    case INDEX_op_call:
1427
        if (const_args[0])
1428
            tcg_out_call(s, COND_AL, args[0]);
1429
        else
1430
            tcg_out_callr(s, COND_AL, args[0]);
1431
        break;
1432
    case INDEX_op_jmp:
1433
        if (const_args[0])
1434
            tcg_out_goto(s, COND_AL, args[0]);
1435
        else
1436
            tcg_out_bx(s, COND_AL, args[0]);
1437
        break;
1438
    case INDEX_op_br:
1439
        tcg_out_goto_label(s, COND_AL, args[0]);
1440
        break;
1441

    
1442
    case INDEX_op_ld8u_i32:
1443
        tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1444
        break;
1445
    case INDEX_op_ld8s_i32:
1446
        tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1447
        break;
1448
    case INDEX_op_ld16u_i32:
1449
        tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1450
        break;
1451
    case INDEX_op_ld16s_i32:
1452
        tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1453
        break;
1454
    case INDEX_op_ld_i32:
1455
        tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1456
        break;
1457
    case INDEX_op_st8_i32:
1458
        tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1459
        break;
1460
    case INDEX_op_st16_i32:
1461
        tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1462
        break;
1463
    case INDEX_op_st_i32:
1464
        tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1465
        break;
1466

    
1467
    case INDEX_op_mov_i32:
1468
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1469
                        args[0], 0, args[1], SHIFT_IMM_LSL(0));
1470
        break;
1471
    case INDEX_op_movi_i32:
1472
        tcg_out_movi32(s, COND_AL, args[0], args[1]);
1473
        break;
1474
    case INDEX_op_add_i32:
1475
        c = ARITH_ADD;
1476
        goto gen_arith;
1477
    case INDEX_op_sub_i32:
1478
        c = ARITH_SUB;
1479
        goto gen_arith;
1480
    case INDEX_op_and_i32:
1481
        c = ARITH_AND;
1482
        goto gen_arith;
1483
    case INDEX_op_andc_i32:
1484
        c = ARITH_BIC;
1485
        goto gen_arith;
1486
    case INDEX_op_or_i32:
1487
        c = ARITH_ORR;
1488
        goto gen_arith;
1489
    case INDEX_op_xor_i32:
1490
        c = ARITH_EOR;
1491
        /* Fall through.  */
1492
    gen_arith:
1493
        if (const_args[2]) {
1494
            int rot;
1495
            rot = encode_imm(args[2]);
1496
            tcg_out_dat_imm(s, COND_AL, c,
1497
                            args[0], args[1], rotl(args[2], rot) | (rot << 7));
1498
        } else
1499
            tcg_out_dat_reg(s, COND_AL, c,
1500
                            args[0], args[1], args[2], SHIFT_IMM_LSL(0));
1501
        break;
1502
    case INDEX_op_add2_i32:
1503
        tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
1504
                        args[0], args[1], args[2], args[3],
1505
                        args[4], args[5], SHIFT_IMM_LSL(0));
1506
        break;
1507
    case INDEX_op_sub2_i32:
1508
        tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
1509
                        args[0], args[1], args[2], args[3],
1510
                        args[4], args[5], SHIFT_IMM_LSL(0));
1511
        break;
1512
    case INDEX_op_neg_i32:
1513
        tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1514
        break;
1515
    case INDEX_op_not_i32:
1516
        tcg_out_dat_reg(s, COND_AL,
1517
                        ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1518
        break;
1519
    case INDEX_op_mul_i32:
1520
        tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1521
        break;
1522
    case INDEX_op_mulu2_i32:
1523
        tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1524
        break;
1525
    /* XXX: Perhaps args[2] & 0x1f is wrong */
1526
    case INDEX_op_shl_i32:
1527
        c = const_args[2] ?
1528
                SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1529
        goto gen_shift32;
1530
    case INDEX_op_shr_i32:
1531
        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1532
                SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1533
        goto gen_shift32;
1534
    case INDEX_op_sar_i32:
1535
        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1536
                SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1537
        goto gen_shift32;
1538
    case INDEX_op_rotr_i32:
1539
        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1540
                SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1541
        /* Fall through.  */
1542
    gen_shift32:
1543
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1544
        break;
1545

    
1546
    case INDEX_op_rotl_i32:
1547
        if (const_args[2]) {
1548
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1549
                            ((0x20 - args[2]) & 0x1f) ?
1550
                            SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1551
                            SHIFT_IMM_LSL(0));
1552
        } else {
1553
            tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_R8, args[1], 0x20);
1554
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1555
                            SHIFT_REG_ROR(TCG_REG_R8));
1556
        }
1557
        break;
1558

    
1559
    case INDEX_op_brcond_i32:
1560
        if (const_args[1]) {
1561
            int rot;
1562
            rot = encode_imm(args[1]);
1563
            tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1564
                            args[0], rotl(args[1], rot) | (rot << 7));
1565
        } else {
1566
            tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1567
                            args[0], args[1], SHIFT_IMM_LSL(0));
1568
        }
1569
        tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1570
        break;
1571
    case INDEX_op_brcond2_i32:
1572
        /* The resulting conditions are:
1573
         * TCG_COND_EQ    -->  a0 == a2 && a1 == a3,
1574
         * TCG_COND_NE    --> (a0 != a2 && a1 == a3) ||  a1 != a3,
1575
         * TCG_COND_LT(U) --> (a0 <  a2 && a1 == a3) ||  a1 <  a3,
1576
         * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1577
         * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1578
         * TCG_COND_GT(U) --> (a0 >  a2 && a1 == a3) ||  a1 >  a3,
1579
         */
1580
        tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1581
                        args[1], args[3], SHIFT_IMM_LSL(0));
1582
        tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1583
                        args[0], args[2], SHIFT_IMM_LSL(0));
1584
        tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1585
        break;
1586
    case INDEX_op_setcond_i32:
1587
        if (const_args[2]) {
1588
            int rot;
1589
            rot = encode_imm(args[2]);
1590
            tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1591
                            args[1], rotl(args[2], rot) | (rot << 7));
1592
        } else {
1593
            tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1594
                            args[1], args[2], SHIFT_IMM_LSL(0));
1595
        }
1596
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1597
                        ARITH_MOV, args[0], 0, 1);
1598
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1599
                        ARITH_MOV, args[0], 0, 0);
1600
        break;
1601
    case INDEX_op_setcond2_i32:
1602
        /* See brcond2_i32 comment */
1603
        tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1604
                        args[2], args[4], SHIFT_IMM_LSL(0));
1605
        tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1606
                        args[1], args[3], SHIFT_IMM_LSL(0));
1607
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1608
                        ARITH_MOV, args[0], 0, 1);
1609
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1610
                        ARITH_MOV, args[0], 0, 0);
1611
        break;
1612

    
1613
    case INDEX_op_qemu_ld8u:
1614
        tcg_out_qemu_ld(s, args, 0);
1615
        break;
1616
    case INDEX_op_qemu_ld8s:
1617
        tcg_out_qemu_ld(s, args, 0 | 4);
1618
        break;
1619
    case INDEX_op_qemu_ld16u:
1620
        tcg_out_qemu_ld(s, args, 1);
1621
        break;
1622
    case INDEX_op_qemu_ld16s:
1623
        tcg_out_qemu_ld(s, args, 1 | 4);
1624
        break;
1625
    case INDEX_op_qemu_ld32:
1626
        tcg_out_qemu_ld(s, args, 2);
1627
        break;
1628
    case INDEX_op_qemu_ld64:
1629
        tcg_out_qemu_ld(s, args, 3);
1630
        break;
1631

    
1632
    case INDEX_op_qemu_st8:
1633
        tcg_out_qemu_st(s, args, 0);
1634
        break;
1635
    case INDEX_op_qemu_st16:
1636
        tcg_out_qemu_st(s, args, 1);
1637
        break;
1638
    case INDEX_op_qemu_st32:
1639
        tcg_out_qemu_st(s, args, 2);
1640
        break;
1641
    case INDEX_op_qemu_st64:
1642
        tcg_out_qemu_st(s, args, 3);
1643
        break;
1644

    
1645
    case INDEX_op_bswap16_i32:
1646
        tcg_out_bswap16(s, COND_AL, args[0], args[1]);
1647
        break;
1648
    case INDEX_op_bswap32_i32:
1649
        tcg_out_bswap32(s, COND_AL, args[0], args[1]);
1650
        break;
1651

    
1652
    case INDEX_op_ext8s_i32:
1653
        tcg_out_ext8s(s, COND_AL, args[0], args[1]);
1654
        break;
1655
    case INDEX_op_ext16s_i32:
1656
        tcg_out_ext16s(s, COND_AL, args[0], args[1]);
1657
        break;
1658
    case INDEX_op_ext16u_i32:
1659
        tcg_out_ext16u(s, COND_AL, args[0], args[1]);
1660
        break;
1661

    
1662
    default:
1663
        tcg_abort();
1664
    }
1665
}
1666

    
1667
static const TCGTargetOpDef arm_op_defs[] = {
1668
    { INDEX_op_exit_tb, { } },
1669
    { INDEX_op_goto_tb, { } },
1670
    { INDEX_op_call, { "ri" } },
1671
    { INDEX_op_jmp, { "ri" } },
1672
    { INDEX_op_br, { } },
1673

    
1674
    { INDEX_op_mov_i32, { "r", "r" } },
1675
    { INDEX_op_movi_i32, { "r" } },
1676

    
1677
    { INDEX_op_ld8u_i32, { "r", "r" } },
1678
    { INDEX_op_ld8s_i32, { "r", "r" } },
1679
    { INDEX_op_ld16u_i32, { "r", "r" } },
1680
    { INDEX_op_ld16s_i32, { "r", "r" } },
1681
    { INDEX_op_ld_i32, { "r", "r" } },
1682
    { INDEX_op_st8_i32, { "r", "r" } },
1683
    { INDEX_op_st16_i32, { "r", "r" } },
1684
    { INDEX_op_st_i32, { "r", "r" } },
1685

    
1686
    /* TODO: "r", "r", "ri" */
1687
    { INDEX_op_add_i32, { "r", "r", "rI" } },
1688
    { INDEX_op_sub_i32, { "r", "r", "rI" } },
1689
    { INDEX_op_mul_i32, { "r", "r", "r" } },
1690
    { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1691
    { INDEX_op_and_i32, { "r", "r", "rI" } },
1692
    { INDEX_op_andc_i32, { "r", "r", "rI" } },
1693
    { INDEX_op_or_i32, { "r", "r", "rI" } },
1694
    { INDEX_op_xor_i32, { "r", "r", "rI" } },
1695
    { INDEX_op_neg_i32, { "r", "r" } },
1696
    { INDEX_op_not_i32, { "r", "r" } },
1697

    
1698
    { INDEX_op_shl_i32, { "r", "r", "ri" } },
1699
    { INDEX_op_shr_i32, { "r", "r", "ri" } },
1700
    { INDEX_op_sar_i32, { "r", "r", "ri" } },
1701
    { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1702
    { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1703

    
1704
    { INDEX_op_brcond_i32, { "r", "rI" } },
1705
    { INDEX_op_setcond_i32, { "r", "r", "rI" } },
1706

    
1707
    /* TODO: "r", "r", "r", "r", "ri", "ri" */
1708
    { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
1709
    { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
1710
    { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
1711
    { INDEX_op_setcond2_i32, { "r", "r", "r", "r", "r" } },
1712

    
1713
#if TARGET_LONG_BITS == 32
1714
    { INDEX_op_qemu_ld8u, { "r", "l" } },
1715
    { INDEX_op_qemu_ld8s, { "r", "l" } },
1716
    { INDEX_op_qemu_ld16u, { "r", "l" } },
1717
    { INDEX_op_qemu_ld16s, { "r", "l" } },
1718
    { INDEX_op_qemu_ld32, { "r", "l" } },
1719
    { INDEX_op_qemu_ld64, { "L", "L", "l" } },
1720

    
1721
    { INDEX_op_qemu_st8, { "s", "s" } },
1722
    { INDEX_op_qemu_st16, { "s", "s" } },
1723
    { INDEX_op_qemu_st32, { "s", "s" } },
1724
    { INDEX_op_qemu_st64, { "S", "S", "s" } },
1725
#else
1726
    { INDEX_op_qemu_ld8u, { "r", "l", "l" } },
1727
    { INDEX_op_qemu_ld8s, { "r", "l", "l" } },
1728
    { INDEX_op_qemu_ld16u, { "r", "l", "l" } },
1729
    { INDEX_op_qemu_ld16s, { "r", "l", "l" } },
1730
    { INDEX_op_qemu_ld32, { "r", "l", "l" } },
1731
    { INDEX_op_qemu_ld64, { "L", "L", "l", "l" } },
1732

    
1733
    { INDEX_op_qemu_st8, { "s", "s", "s" } },
1734
    { INDEX_op_qemu_st16, { "s", "s", "s" } },
1735
    { INDEX_op_qemu_st32, { "s", "s", "s" } },
1736
    { INDEX_op_qemu_st64, { "S", "S", "s", "s" } },
1737
#endif
1738

    
1739
    { INDEX_op_bswap16_i32, { "r", "r" } },
1740
    { INDEX_op_bswap32_i32, { "r", "r" } },
1741

    
1742
    { INDEX_op_ext8s_i32, { "r", "r" } },
1743
    { INDEX_op_ext16s_i32, { "r", "r" } },
1744
    { INDEX_op_ext16u_i32, { "r", "r" } },
1745

    
1746
    { -1 },
1747
};
1748

    
1749
void tcg_target_init(TCGContext *s)
1750
{
1751
#if !defined(CONFIG_USER_ONLY)
1752
    /* fail safe */
1753
    if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1754
        tcg_abort();
1755
#endif
1756

    
1757
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
1758
    tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1759
                     (1 << TCG_REG_R0) |
1760
                     (1 << TCG_REG_R1) |
1761
                     (1 << TCG_REG_R2) |
1762
                     (1 << TCG_REG_R3) |
1763
                     (1 << TCG_REG_R12) |
1764
                     (1 << TCG_REG_R14));
1765

    
1766
    tcg_regset_clear(s->reserved_regs);
1767
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1768
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
1769
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
1770

    
1771
    tcg_add_target_add_op_defs(arm_op_defs);
1772
}
1773

    
1774
static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
1775
                int arg1, tcg_target_long arg2)
1776
{
1777
    tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
1778
}
1779

    
1780
static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
1781
                int arg1, tcg_target_long arg2)
1782
{
1783
    tcg_out_st32(s, COND_AL, arg, arg1, arg2);
1784
}
1785

    
1786
static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
1787
{
1788
    if (val > 0)
1789
        if (val < 0x100)
1790
            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val);
1791
        else
1792
            tcg_abort();
1793
    else if (val < 0) {
1794
        if (val > -0x100)
1795
            tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val);
1796
        else
1797
            tcg_abort();
1798
    }
1799
}
1800

    
1801
static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
1802
{
1803
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
1804
}
1805

    
1806
static inline void tcg_out_movi(TCGContext *s, TCGType type,
1807
                int ret, tcg_target_long arg)
1808
{
1809
    tcg_out_movi32(s, COND_AL, ret, arg);
1810
}
1811

    
1812
void tcg_target_qemu_prologue(TCGContext *s)
1813
{
1814
    /* There is no need to save r7, it is used to store the address
1815
       of the env structure and is not modified by GCC. */
1816

    
1817
    /* stmdb sp!, { r4 - r6, r8 - r11, lr } */
1818
    tcg_out32(s, (COND_AL << 28) | 0x092d4f70);
1819

    
1820
    tcg_out_bx(s, COND_AL, TCG_REG_R0);
1821
    tb_ret_addr = s->code_ptr;
1822

    
1823
    /* ldmia sp!, { r4 - r6, r8 - r11, pc } */
1824
    tcg_out32(s, (COND_AL << 28) | 0x08bd8f70);
1825
}