Statistics
| Branch: | Revision:

root / tcg / arm / tcg-target.c @ 23401b58

History | View | Annotate | Download (52.9 kB)

1
/*
2
 * Tiny Code Generator for QEMU
3
 *
4
 * Copyright (c) 2008 Andrzej Zaborowski
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#if defined(__ARM_ARCH_7__) ||  \
26
    defined(__ARM_ARCH_7A__) || \
27
    defined(__ARM_ARCH_7EM__) || \
28
    defined(__ARM_ARCH_7M__) || \
29
    defined(__ARM_ARCH_7R__)
30
#define USE_ARMV7_INSTRUCTIONS
31
#endif
32

    
33
#if defined(USE_ARMV7_INSTRUCTIONS) || \
34
    defined(__ARM_ARCH_6J__) || \
35
    defined(__ARM_ARCH_6K__) || \
36
    defined(__ARM_ARCH_6T2__) || \
37
    defined(__ARM_ARCH_6Z__) || \
38
    defined(__ARM_ARCH_6ZK__)
39
#define USE_ARMV6_INSTRUCTIONS
40
#endif
41

    
42
#if defined(USE_ARMV6_INSTRUCTIONS) || \
43
    defined(__ARM_ARCH_5T__) || \
44
    defined(__ARM_ARCH_5TE__) || \
45
    defined(__ARM_ARCH_5TEJ__)
46
#define USE_ARMV5_INSTRUCTIONS
47
#endif
48

    
49
#ifdef USE_ARMV5_INSTRUCTIONS
50
static const int use_armv5_instructions = 1;
51
#else
52
static const int use_armv5_instructions = 0;
53
#endif
54
#undef USE_ARMV5_INSTRUCTIONS
55

    
56
#ifdef USE_ARMV6_INSTRUCTIONS
57
static const int use_armv6_instructions = 1;
58
#else
59
static const int use_armv6_instructions = 0;
60
#endif
61
#undef USE_ARMV6_INSTRUCTIONS
62

    
63
#ifdef USE_ARMV7_INSTRUCTIONS
64
static const int use_armv7_instructions = 1;
65
#else
66
static const int use_armv7_instructions = 0;
67
#endif
68
#undef USE_ARMV7_INSTRUCTIONS
69

    
70
#ifndef NDEBUG
71
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
72
    "%r0",
73
    "%r1",
74
    "%r2",
75
    "%r3",
76
    "%r4",
77
    "%r5",
78
    "%r6",
79
    "%r7",
80
    "%r8",
81
    "%r9",
82
    "%r10",
83
    "%r11",
84
    "%r12",
85
    "%r13",
86
    "%r14",
87
    "%pc",
88
};
89
#endif
90

    
91
static const int tcg_target_reg_alloc_order[] = {
92
    TCG_REG_R0,
93
    TCG_REG_R1,
94
    TCG_REG_R2,
95
    TCG_REG_R3,
96
    TCG_REG_R4,
97
    TCG_REG_R5,
98
    TCG_REG_R6,
99
    TCG_REG_R7,
100
    TCG_REG_R8,
101
    TCG_REG_R9,
102
    TCG_REG_R10,
103
    TCG_REG_R11,
104
    TCG_REG_R12,
105
    TCG_REG_R13,
106
    TCG_REG_R14,
107
};
108

    
109
static const int tcg_target_call_iarg_regs[4] = {
110
    TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
111
};
112
static const int tcg_target_call_oarg_regs[2] = {
113
    TCG_REG_R0, TCG_REG_R1
114
};
115

    
116
static void patch_reloc(uint8_t *code_ptr, int type,
117
                tcg_target_long value, tcg_target_long addend)
118
{
119
    switch (type) {
120
    case R_ARM_ABS32:
121
        *(uint32_t *) code_ptr = value;
122
        break;
123

    
124
    case R_ARM_CALL:
125
    case R_ARM_JUMP24:
126
    default:
127
        tcg_abort();
128

    
129
    case R_ARM_PC24:
130
        *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & 0xff000000) |
131
                (((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff);
132
        break;
133
    }
134
}
135

    
136
/* maximum number of register used for input function arguments */
137
static inline int tcg_target_get_call_iarg_regs_count(int flags)
138
{
139
    return 4;
140
}
141

    
142
/* parse target specific constraints */
143
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
144
{
145
    const char *ct_str;
146

    
147
    ct_str = *pct_str;
148
    switch (ct_str[0]) {
149
    case 'I':
150
         ct->ct |= TCG_CT_CONST_ARM;
151
         break;
152

    
153
    case 'r':
154
#ifndef CONFIG_SOFTMMU
155
    case 'd':
156
    case 'D':
157
    case 'x':
158
    case 'X':
159
#endif
160
        ct->ct |= TCG_CT_REG;
161
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
162
        break;
163

    
164
#ifdef CONFIG_SOFTMMU
165
    /* qemu_ld/st inputs (unless 'X', 'd' or 'D') */
166
    case 'x':
167
        ct->ct |= TCG_CT_REG;
168
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
169
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
170
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
171
        break;
172

    
173
    /* qemu_ld64 data_reg */
174
    case 'd':
175
        ct->ct |= TCG_CT_REG;
176
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
177
        /* r1 is still needed to load data_reg2, so don't use it.  */
178
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
179
        break;
180

    
181
    /* qemu_ld/st64 data_reg2 */
182
    case 'D':
183
        ct->ct |= TCG_CT_REG;
184
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
185
        /* r0, r1 and optionally r2 will be overwritten by the address
186
         * and the low word of data, so don't use these.  */
187
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
188
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
189
# if TARGET_LONG_BITS == 64
190
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
191
# endif
192
        break;
193

    
194
# if TARGET_LONG_BITS == 64
195
    /* qemu_ld/st addr_reg2 */
196
    case 'X':
197
        ct->ct |= TCG_CT_REG;
198
        tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
199
        /* r0 will be overwritten by the low word of base, so don't use it.  */
200
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
201
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
202
        break;
203
# endif
204
#endif
205

    
206
    default:
207
        return -1;
208
    }
209
    ct_str++;
210
    *pct_str = ct_str;
211

    
212
    return 0;
213
}
214

    
215
static inline uint32_t rotl(uint32_t val, int n)
216
{
217
  return (val << n) | (val >> (32 - n));
218
}
219

    
220
/* ARM immediates for ALU instructions are made of an unsigned 8-bit
221
   right-rotated by an even amount between 0 and 30. */
222
static inline int encode_imm(uint32_t imm)
223
{
224
    int shift;
225

    
226
    /* simple case, only lower bits */
227
    if ((imm & ~0xff) == 0)
228
        return 0;
229
    /* then try a simple even shift */
230
    shift = ctz32(imm) & ~1;
231
    if (((imm >> shift) & ~0xff) == 0)
232
        return 32 - shift;
233
    /* now try harder with rotations */
234
    if ((rotl(imm, 2) & ~0xff) == 0)
235
        return 2;
236
    if ((rotl(imm, 4) & ~0xff) == 0)
237
        return 4;
238
    if ((rotl(imm, 6) & ~0xff) == 0)
239
        return 6;
240
    /* imm can't be encoded */
241
    return -1;
242
}
243

    
244
static inline int check_fit_imm(uint32_t imm)
245
{
246
    return encode_imm(imm) >= 0;
247
}
248

    
249
/* Test if a constant matches the constraint.
250
 * TODO: define constraints for:
251
 *
252
 * ldr/str offset:   between -0xfff and 0xfff
253
 * ldrh/strh offset: between -0xff and 0xff
254
 * mov operand2:     values represented with x << (2 * y), x < 0x100
255
 * add, sub, eor...: ditto
256
 */
257
static inline int tcg_target_const_match(tcg_target_long val,
258
                const TCGArgConstraint *arg_ct)
259
{
260
    int ct;
261
    ct = arg_ct->ct;
262
    if (ct & TCG_CT_CONST)
263
        return 1;
264
    else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val))
265
        return 1;
266
    else
267
        return 0;
268
}
269

    
270
enum arm_data_opc_e {
271
    ARITH_AND = 0x0,
272
    ARITH_EOR = 0x1,
273
    ARITH_SUB = 0x2,
274
    ARITH_RSB = 0x3,
275
    ARITH_ADD = 0x4,
276
    ARITH_ADC = 0x5,
277
    ARITH_SBC = 0x6,
278
    ARITH_RSC = 0x7,
279
    ARITH_TST = 0x8,
280
    ARITH_CMP = 0xa,
281
    ARITH_CMN = 0xb,
282
    ARITH_ORR = 0xc,
283
    ARITH_MOV = 0xd,
284
    ARITH_BIC = 0xe,
285
    ARITH_MVN = 0xf,
286
};
287

    
288
#define TO_CPSR(opc) \
289
  ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
290

    
291
#define SHIFT_IMM_LSL(im)        (((im) << 7) | 0x00)
292
#define SHIFT_IMM_LSR(im)        (((im) << 7) | 0x20)
293
#define SHIFT_IMM_ASR(im)        (((im) << 7) | 0x40)
294
#define SHIFT_IMM_ROR(im)        (((im) << 7) | 0x60)
295
#define SHIFT_REG_LSL(rs)        (((rs) << 8) | 0x10)
296
#define SHIFT_REG_LSR(rs)        (((rs) << 8) | 0x30)
297
#define SHIFT_REG_ASR(rs)        (((rs) << 8) | 0x50)
298
#define SHIFT_REG_ROR(rs)        (((rs) << 8) | 0x70)
299

    
300
enum arm_cond_code_e {
301
    COND_EQ = 0x0,
302
    COND_NE = 0x1,
303
    COND_CS = 0x2,        /* Unsigned greater or equal */
304
    COND_CC = 0x3,        /* Unsigned less than */
305
    COND_MI = 0x4,        /* Negative */
306
    COND_PL = 0x5,        /* Zero or greater */
307
    COND_VS = 0x6,        /* Overflow */
308
    COND_VC = 0x7,        /* No overflow */
309
    COND_HI = 0x8,        /* Unsigned greater than */
310
    COND_LS = 0x9,        /* Unsigned less or equal */
311
    COND_GE = 0xa,
312
    COND_LT = 0xb,
313
    COND_GT = 0xc,
314
    COND_LE = 0xd,
315
    COND_AL = 0xe,
316
};
317

    
318
static const uint8_t tcg_cond_to_arm_cond[10] = {
319
    [TCG_COND_EQ] = COND_EQ,
320
    [TCG_COND_NE] = COND_NE,
321
    [TCG_COND_LT] = COND_LT,
322
    [TCG_COND_GE] = COND_GE,
323
    [TCG_COND_LE] = COND_LE,
324
    [TCG_COND_GT] = COND_GT,
325
    /* unsigned */
326
    [TCG_COND_LTU] = COND_CC,
327
    [TCG_COND_GEU] = COND_CS,
328
    [TCG_COND_LEU] = COND_LS,
329
    [TCG_COND_GTU] = COND_HI,
330
};
331

    
332
static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
333
{
334
    tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
335
}
336

    
337
static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
338
{
339
    tcg_out32(s, (cond << 28) | 0x0a000000 |
340
                    (((offset - 8) >> 2) & 0x00ffffff));
341
}
342

    
343
static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
344
{
345
#ifdef HOST_WORDS_BIGENDIAN
346
    tcg_out8(s, (cond << 4) | 0x0a);
347
    s->code_ptr += 3;
348
#else
349
    s->code_ptr += 3;
350
    tcg_out8(s, (cond << 4) | 0x0a);
351
#endif
352
}
353

    
354
static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
355
{
356
    tcg_out32(s, (cond << 28) | 0x0b000000 |
357
                    (((offset - 8) >> 2) & 0x00ffffff));
358
}
359

    
360
static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
361
{
362
    tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
363
}
364

    
365
static inline void tcg_out_dat_reg(TCGContext *s,
366
                int cond, int opc, int rd, int rn, int rm, int shift)
367
{
368
    tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
369
                    (rn << 16) | (rd << 12) | shift | rm);
370
}
371

    
372
static inline void tcg_out_dat_reg2(TCGContext *s,
373
                int cond, int opc0, int opc1, int rd0, int rd1,
374
                int rn0, int rn1, int rm0, int rm1, int shift)
375
{
376
    if (rd0 == rn1 || rd0 == rm1) {
377
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
378
                        (rn0 << 16) | (8 << 12) | shift | rm0);
379
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
380
                        (rn1 << 16) | (rd1 << 12) | shift | rm1);
381
        tcg_out_dat_reg(s, cond, ARITH_MOV,
382
                        rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
383
    } else {
384
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
385
                        (rn0 << 16) | (rd0 << 12) | shift | rm0);
386
        tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
387
                        (rn1 << 16) | (rd1 << 12) | shift | rm1);
388
    }
389
}
390

    
391
static inline void tcg_out_dat_imm(TCGContext *s,
392
                int cond, int opc, int rd, int rn, int im)
393
{
394
    tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
395
                    (rn << 16) | (rd << 12) | im);
396
}
397

    
398
static inline void tcg_out_movi32(TCGContext *s,
399
                int cond, int rd, int32_t arg)
400
{
401
    int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8);
402

    
403
    /* TODO: This is very suboptimal, we can easily have a constant
404
     * pool somewhere after all the instructions.  */
405

    
406
    if (arg < 0 && arg > -0x100)
407
        return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
408

    
409
    if (offset < 0x100 && offset > -0x100)
410
        return offset >= 0 ?
411
                tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) :
412
                tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset);
413

    
414
    if (use_armv7_instructions) {
415
        /* use movw/movt */
416
        /* movw */
417
        tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
418
                  | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
419
        if (arg & 0xffff0000)
420
            /* movt */
421
            tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
422
                      | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
423
    } else {
424
        tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff);
425
        if (arg & 0x0000ff00)
426
            tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
427
                            ((arg >>  8) & 0xff) | 0xc00);
428
        if (arg & 0x00ff0000)
429
            tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
430
                            ((arg >> 16) & 0xff) | 0x800);
431
        if (arg & 0xff000000)
432
            tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
433
                            ((arg >> 24) & 0xff) | 0x400);
434
        }
435
}
436

    
437
static inline void tcg_out_mul32(TCGContext *s,
438
                int cond, int rd, int rs, int rm)
439
{
440
    if (rd != rm)
441
        tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
442
                        (rs << 8) | 0x90 | rm);
443
    else if (rd != rs)
444
        tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
445
                        (rm << 8) | 0x90 | rs);
446
    else {
447
        tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
448
                        (rs << 8) | 0x90 | rm);
449
        tcg_out_dat_reg(s, cond, ARITH_MOV,
450
                        rd, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
451
    }
452
}
453

    
454
static inline void tcg_out_umull32(TCGContext *s,
455
                int cond, int rd0, int rd1, int rs, int rm)
456
{
457
    if (rd0 != rm && rd1 != rm)
458
        tcg_out32(s, (cond << 28) | 0x800090 |
459
                        (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
460
    else if (rd0 != rs && rd1 != rs)
461
        tcg_out32(s, (cond << 28) | 0x800090 |
462
                        (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
463
    else {
464
        tcg_out_dat_reg(s, cond, ARITH_MOV,
465
                        TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
466
        tcg_out32(s, (cond << 28) | 0x800098 |
467
                        (rd1 << 16) | (rd0 << 12) | (rs << 8));
468
    }
469
}
470

    
471
static inline void tcg_out_smull32(TCGContext *s,
472
                int cond, int rd0, int rd1, int rs, int rm)
473
{
474
    if (rd0 != rm && rd1 != rm)
475
        tcg_out32(s, (cond << 28) | 0xc00090 |
476
                        (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
477
    else if (rd0 != rs && rd1 != rs)
478
        tcg_out32(s, (cond << 28) | 0xc00090 |
479
                        (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
480
    else {
481
        tcg_out_dat_reg(s, cond, ARITH_MOV,
482
                        TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
483
        tcg_out32(s, (cond << 28) | 0xc00098 |
484
                        (rd1 << 16) | (rd0 << 12) | (rs << 8));
485
    }
486
}
487

    
488
static inline void tcg_out_ld32_12(TCGContext *s, int cond,
489
                int rd, int rn, tcg_target_long im)
490
{
491
    if (im >= 0)
492
        tcg_out32(s, (cond << 28) | 0x05900000 |
493
                        (rn << 16) | (rd << 12) | (im & 0xfff));
494
    else
495
        tcg_out32(s, (cond << 28) | 0x05100000 |
496
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
497
}
498

    
499
static inline void tcg_out_st32_12(TCGContext *s, int cond,
500
                int rd, int rn, tcg_target_long im)
501
{
502
    if (im >= 0)
503
        tcg_out32(s, (cond << 28) | 0x05800000 |
504
                        (rn << 16) | (rd << 12) | (im & 0xfff));
505
    else
506
        tcg_out32(s, (cond << 28) | 0x05000000 |
507
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
508
}
509

    
510
static inline void tcg_out_ld32_r(TCGContext *s, int cond,
511
                int rd, int rn, int rm)
512
{
513
    tcg_out32(s, (cond << 28) | 0x07900000 |
514
                    (rn << 16) | (rd << 12) | rm);
515
}
516

    
517
static inline void tcg_out_st32_r(TCGContext *s, int cond,
518
                int rd, int rn, int rm)
519
{
520
    tcg_out32(s, (cond << 28) | 0x07800000 |
521
                    (rn << 16) | (rd << 12) | rm);
522
}
523

    
524
/* Register pre-increment with base writeback.  */
525
static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
526
                int rd, int rn, int rm)
527
{
528
    tcg_out32(s, (cond << 28) | 0x07b00000 |
529
                    (rn << 16) | (rd << 12) | rm);
530
}
531

    
532
static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
533
                int rd, int rn, int rm)
534
{
535
    tcg_out32(s, (cond << 28) | 0x07a00000 |
536
                    (rn << 16) | (rd << 12) | rm);
537
}
538

    
539
static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
540
                int rd, int rn, tcg_target_long im)
541
{
542
    if (im >= 0)
543
        tcg_out32(s, (cond << 28) | 0x01d000b0 |
544
                        (rn << 16) | (rd << 12) |
545
                        ((im & 0xf0) << 4) | (im & 0xf));
546
    else
547
        tcg_out32(s, (cond << 28) | 0x015000b0 |
548
                        (rn << 16) | (rd << 12) |
549
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
550
}
551

    
552
static inline void tcg_out_st16_8(TCGContext *s, int cond,
553
                int rd, int rn, tcg_target_long im)
554
{
555
    if (im >= 0)
556
        tcg_out32(s, (cond << 28) | 0x01c000b0 |
557
                        (rn << 16) | (rd << 12) |
558
                        ((im & 0xf0) << 4) | (im & 0xf));
559
    else
560
        tcg_out32(s, (cond << 28) | 0x014000b0 |
561
                        (rn << 16) | (rd << 12) |
562
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
563
}
564

    
565
static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
566
                int rd, int rn, int rm)
567
{
568
    tcg_out32(s, (cond << 28) | 0x019000b0 |
569
                    (rn << 16) | (rd << 12) | rm);
570
}
571

    
572
static inline void tcg_out_st16_r(TCGContext *s, int cond,
573
                int rd, int rn, int rm)
574
{
575
    tcg_out32(s, (cond << 28) | 0x018000b0 |
576
                    (rn << 16) | (rd << 12) | rm);
577
}
578

    
579
static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
580
                int rd, int rn, tcg_target_long im)
581
{
582
    if (im >= 0)
583
        tcg_out32(s, (cond << 28) | 0x01d000f0 |
584
                        (rn << 16) | (rd << 12) |
585
                        ((im & 0xf0) << 4) | (im & 0xf));
586
    else
587
        tcg_out32(s, (cond << 28) | 0x015000f0 |
588
                        (rn << 16) | (rd << 12) |
589
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
590
}
591

    
592
static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
593
                int rd, int rn, int rm)
594
{
595
    tcg_out32(s, (cond << 28) | 0x019000f0 |
596
                    (rn << 16) | (rd << 12) | rm);
597
}
598

    
599
static inline void tcg_out_ld8_12(TCGContext *s, int cond,
600
                int rd, int rn, tcg_target_long im)
601
{
602
    if (im >= 0)
603
        tcg_out32(s, (cond << 28) | 0x05d00000 |
604
                        (rn << 16) | (rd << 12) | (im & 0xfff));
605
    else
606
        tcg_out32(s, (cond << 28) | 0x05500000 |
607
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
608
}
609

    
610
static inline void tcg_out_st8_12(TCGContext *s, int cond,
611
                int rd, int rn, tcg_target_long im)
612
{
613
    if (im >= 0)
614
        tcg_out32(s, (cond << 28) | 0x05c00000 |
615
                        (rn << 16) | (rd << 12) | (im & 0xfff));
616
    else
617
        tcg_out32(s, (cond << 28) | 0x05400000 |
618
                        (rn << 16) | (rd << 12) | ((-im) & 0xfff));
619
}
620

    
621
static inline void tcg_out_ld8_r(TCGContext *s, int cond,
622
                int rd, int rn, int rm)
623
{
624
    tcg_out32(s, (cond << 28) | 0x07d00000 |
625
                    (rn << 16) | (rd << 12) | rm);
626
}
627

    
628
static inline void tcg_out_st8_r(TCGContext *s, int cond,
629
                int rd, int rn, int rm)
630
{
631
    tcg_out32(s, (cond << 28) | 0x07c00000 |
632
                    (rn << 16) | (rd << 12) | rm);
633
}
634

    
635
static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
636
                int rd, int rn, tcg_target_long im)
637
{
638
    if (im >= 0)
639
        tcg_out32(s, (cond << 28) | 0x01d000d0 |
640
                        (rn << 16) | (rd << 12) |
641
                        ((im & 0xf0) << 4) | (im & 0xf));
642
    else
643
        tcg_out32(s, (cond << 28) | 0x015000d0 |
644
                        (rn << 16) | (rd << 12) |
645
                        (((-im) & 0xf0) << 4) | ((-im) & 0xf));
646
}
647

    
648
static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
649
                int rd, int rn, int rm)
650
{
651
    tcg_out32(s, (cond << 28) | 0x019000d0 |
652
                    (rn << 16) | (rd << 12) | rm);
653
}
654

    
655
static inline void tcg_out_ld32u(TCGContext *s, int cond,
656
                int rd, int rn, int32_t offset)
657
{
658
    if (offset > 0xfff || offset < -0xfff) {
659
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
660
        tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
661
    } else
662
        tcg_out_ld32_12(s, cond, rd, rn, offset);
663
}
664

    
665
static inline void tcg_out_st32(TCGContext *s, int cond,
666
                int rd, int rn, int32_t offset)
667
{
668
    if (offset > 0xfff || offset < -0xfff) {
669
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
670
        tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
671
    } else
672
        tcg_out_st32_12(s, cond, rd, rn, offset);
673
}
674

    
675
static inline void tcg_out_ld16u(TCGContext *s, int cond,
676
                int rd, int rn, int32_t offset)
677
{
678
    if (offset > 0xff || offset < -0xff) {
679
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
680
        tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
681
    } else
682
        tcg_out_ld16u_8(s, cond, rd, rn, offset);
683
}
684

    
685
static inline void tcg_out_ld16s(TCGContext *s, int cond,
686
                int rd, int rn, int32_t offset)
687
{
688
    if (offset > 0xff || offset < -0xff) {
689
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
690
        tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
691
    } else
692
        tcg_out_ld16s_8(s, cond, rd, rn, offset);
693
}
694

    
695
static inline void tcg_out_st16(TCGContext *s, int cond,
696
                int rd, int rn, int32_t offset)
697
{
698
    if (offset > 0xff || offset < -0xff) {
699
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
700
        tcg_out_st16_r(s, cond, rd, rn, TCG_REG_R8);
701
    } else
702
        tcg_out_st16_8(s, cond, rd, rn, offset);
703
}
704

    
705
static inline void tcg_out_ld8u(TCGContext *s, int cond,
706
                int rd, int rn, int32_t offset)
707
{
708
    if (offset > 0xfff || offset < -0xfff) {
709
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
710
        tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
711
    } else
712
        tcg_out_ld8_12(s, cond, rd, rn, offset);
713
}
714

    
715
static inline void tcg_out_ld8s(TCGContext *s, int cond,
716
                int rd, int rn, int32_t offset)
717
{
718
    if (offset > 0xff || offset < -0xff) {
719
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
720
        tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
721
    } else
722
        tcg_out_ld8s_8(s, cond, rd, rn, offset);
723
}
724

    
725
static inline void tcg_out_st8(TCGContext *s, int cond,
726
                int rd, int rn, int32_t offset)
727
{
728
    if (offset > 0xfff || offset < -0xfff) {
729
        tcg_out_movi32(s, cond, TCG_REG_R8, offset);
730
        tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
731
    } else
732
        tcg_out_st8_12(s, cond, rd, rn, offset);
733
}
734

    
735
static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
736
{
737
    int32_t val;
738

    
739
    val = addr - (tcg_target_long) s->code_ptr;
740
    if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
741
        tcg_out_b(s, cond, val);
742
    else {
743
#if 1
744
        tcg_abort();
745
#else
746
        if (cond == COND_AL) {
747
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
748
            tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
749
        } else {
750
            tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
751
            tcg_out_dat_reg(s, cond, ARITH_ADD,
752
                            TCG_REG_PC, TCG_REG_PC,
753
                            TCG_REG_R8, SHIFT_IMM_LSL(0));
754
        }
755
#endif
756
    }
757
}
758

    
759
static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr)
760
{
761
    int32_t val;
762

    
763
    val = addr - (tcg_target_long) s->code_ptr;
764
    if (val < 0x01fffffd && val > -0x01fffffd)
765
        tcg_out_bl(s, cond, val);
766
    else {
767
#if 1
768
        tcg_abort();
769
#else
770
        if (cond == COND_AL) {
771
            tcg_out_dat_imm(s, cond, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
772
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
773
            tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
774
        } else {
775
            tcg_out_movi32(s, cond, TCG_REG_R9, addr);
776
            tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
777
                            TCG_REG_PC, SHIFT_IMM_LSL(0));
778
            tcg_out_bx(s, cond, TCG_REG_R9);
779
        }
780
#endif
781
    }
782
}
783

    
784
static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
785
{
786
    if (use_armv5_instructions) {
787
        tcg_out_blx(s, cond, arg);
788
    } else {
789
        tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
790
                        TCG_REG_PC, SHIFT_IMM_LSL(0));
791
        tcg_out_bx(s, cond, arg);
792
    }
793
}
794

    
795
static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
796
{
797
    TCGLabel *l = &s->labels[label_index];
798

    
799
    if (l->has_value)
800
        tcg_out_goto(s, cond, l->u.value);
801
    else if (cond == COND_AL) {
802
        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
803
        tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
804
        s->code_ptr += 4;
805
    } else {
806
        /* Probably this should be preferred even for COND_AL... */
807
        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
808
        tcg_out_b_noaddr(s, cond);
809
    }
810
}
811

    
812
#ifdef CONFIG_SOFTMMU
813

    
814
#include "../../softmmu_defs.h"
815

    
816
static void *qemu_ld_helpers[4] = {
817
    __ldb_mmu,
818
    __ldw_mmu,
819
    __ldl_mmu,
820
    __ldq_mmu,
821
};
822

    
823
static void *qemu_st_helpers[4] = {
824
    __stb_mmu,
825
    __stw_mmu,
826
    __stl_mmu,
827
    __stq_mmu,
828
};
829
#endif
830

    
831
#define TLB_SHIFT        (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
832

    
833
static inline void tcg_out_qemu_ld(TCGContext *s, int cond,
834
                const TCGArg *args, int opc)
835
{
836
    int addr_reg, data_reg, data_reg2;
837
#ifdef CONFIG_SOFTMMU
838
    int mem_index, s_bits;
839
# if TARGET_LONG_BITS == 64
840
    int addr_reg2;
841
# endif
842
    uint32_t *label_ptr;
843
#endif
844

    
845
    data_reg = *args++;
846
    if (opc == 3)
847
        data_reg2 = *args++;
848
    else
849
        data_reg2 = 0; /* suppress warning */
850
    addr_reg = *args++;
851
#ifdef CONFIG_SOFTMMU
852
# if TARGET_LONG_BITS == 64
853
    addr_reg2 = *args++;
854
# endif
855
    mem_index = *args;
856
    s_bits = opc & 3;
857

    
858
    /* Should generate something like the following:
859
     *  shr r8, addr_reg, #TARGET_PAGE_BITS
860
     *  and r0, r8, #(CPU_TLB_SIZE - 1)   @ Assumption: CPU_TLB_BITS <= 8
861
     *  add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
862
     */
863
#  if CPU_TLB_BITS > 8
864
#   error
865
#  endif
866
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_R8,
867
                    0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
868
    tcg_out_dat_imm(s, COND_AL, ARITH_AND,
869
                    TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
870
    tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
871
                    TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
872
    /* In the
873
     *  ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
874
     * below, the offset is likely to exceed 12 bits if mem_index != 0 and
875
     * not exceed otherwise, so use an
876
     *  add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
877
     * before.
878
     */
879
    if (mem_index)
880
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
881
                        (mem_index << (TLB_SHIFT & 1)) |
882
                        ((16 - (TLB_SHIFT >> 1)) << 8));
883
    tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
884
                    offsetof(CPUState, tlb_table[0][0].addr_read));
885
    tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
886
                    TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
887
    /* Check alignment.  */
888
    if (s_bits)
889
        tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
890
                        0, addr_reg, (1 << s_bits) - 1);
891
#  if TARGET_LONG_BITS == 64
892
    /* XXX: possibly we could use a block data load or writeback in
893
     * the first access.  */
894
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
895
                    offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
896
    tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
897
                    TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
898
#  endif
899
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
900
                    offsetof(CPUState, tlb_table[0][0].addend));
901

    
902
    switch (opc) {
903
    case 0:
904
        tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
905
        break;
906
    case 0 | 4:
907
        tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
908
        break;
909
    case 1:
910
        tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
911
        break;
912
    case 1 | 4:
913
        tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
914
        break;
915
    case 2:
916
    default:
917
        tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
918
        break;
919
    case 3:
920
        tcg_out_ld32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
921
        tcg_out_ld32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
922
        break;
923
    }
924

    
925
    label_ptr = (void *) s->code_ptr;
926
    tcg_out_b(s, COND_EQ, 8);
927

    
928
    /* TODO: move this code to where the constants pool will be */
929
    if (addr_reg != TCG_REG_R0) {
930
        tcg_out_dat_reg(s, cond, ARITH_MOV,
931
                        TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
932
    }
933
# if TARGET_LONG_BITS == 32
934
    tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R1, 0, mem_index);
935
# else
936
    if (addr_reg2 != TCG_REG_R1) {
937
        tcg_out_dat_reg(s, cond, ARITH_MOV,
938
                        TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
939
    }
940
    tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index);
941
# endif
942
    tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] -
943
                    (tcg_target_long) s->code_ptr);
944

    
945
    switch (opc) {
946
    case 0 | 4:
947
        tcg_out_dat_reg(s, cond, ARITH_MOV,
948
                        TCG_REG_R0, 0, TCG_REG_R0, SHIFT_IMM_LSL(24));
949
        tcg_out_dat_reg(s, cond, ARITH_MOV,
950
                        data_reg, 0, TCG_REG_R0, SHIFT_IMM_ASR(24));
951
        break;
952
    case 1 | 4:
953
        tcg_out_dat_reg(s, cond, ARITH_MOV,
954
                        TCG_REG_R0, 0, TCG_REG_R0, SHIFT_IMM_LSL(16));
955
        tcg_out_dat_reg(s, cond, ARITH_MOV,
956
                        data_reg, 0, TCG_REG_R0, SHIFT_IMM_ASR(16));
957
        break;
958
    case 0:
959
    case 1:
960
    case 2:
961
    default:
962
        if (data_reg != TCG_REG_R0) {
963
            tcg_out_dat_reg(s, cond, ARITH_MOV,
964
                            data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
965
        }
966
        break;
967
    case 3:
968
        if (data_reg != TCG_REG_R0) {
969
            tcg_out_dat_reg(s, cond, ARITH_MOV,
970
                            data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
971
        }
972
        if (data_reg2 != TCG_REG_R1) {
973
            tcg_out_dat_reg(s, cond, ARITH_MOV,
974
                            data_reg2, 0, TCG_REG_R1, SHIFT_IMM_LSL(0));
975
        }
976
        break;
977
    }
978

    
979
    *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
980
#else /* !CONFIG_SOFTMMU */
981
    if (GUEST_BASE) {
982
        uint32_t offset = GUEST_BASE;
983
        int i;
984
        int rot;
985

    
986
        while (offset) {
987
            i = ctz32(offset) & ~1;
988
            rot = ((32 - i) << 7) & 0xf00;
989

    
990
            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R8, addr_reg,
991
                            ((offset >> i) & 0xff) | rot);
992
            addr_reg = TCG_REG_R8;
993
            offset &= ~(0xff << i);
994
        }
995
    }
996
    switch (opc) {
997
    case 0:
998
        tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
999
        break;
1000
    case 0 | 4:
1001
        tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1002
        break;
1003
    case 1:
1004
        tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1005
        break;
1006
    case 1 | 4:
1007
        tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1008
        break;
1009
    case 2:
1010
    default:
1011
        tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1012
        break;
1013
    case 3:
1014
        /* TODO: use block load -
1015
         * check that data_reg2 > data_reg or the other way */
1016
        if (data_reg == addr_reg) {
1017
            tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
1018
            tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1019
        } else {
1020
            tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1021
            tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
1022
        }
1023
        break;
1024
    }
1025
#endif
1026
}
1027

    
1028
static inline void tcg_out_qemu_st(TCGContext *s, int cond,
1029
                const TCGArg *args, int opc)
1030
{
1031
    int addr_reg, data_reg, data_reg2;
1032
#ifdef CONFIG_SOFTMMU
1033
    int mem_index, s_bits;
1034
# if TARGET_LONG_BITS == 64
1035
    int addr_reg2;
1036
# endif
1037
    uint32_t *label_ptr;
1038
#endif
1039

    
1040
    data_reg = *args++;
1041
    if (opc == 3)
1042
        data_reg2 = *args++;
1043
    else
1044
        data_reg2 = 0; /* suppress warning */
1045
    addr_reg = *args++;
1046
#ifdef CONFIG_SOFTMMU
1047
# if TARGET_LONG_BITS == 64
1048
    addr_reg2 = *args++;
1049
# endif
1050
    mem_index = *args;
1051
    s_bits = opc & 3;
1052

    
1053
    /* Should generate something like the following:
1054
     *  shr r8, addr_reg, #TARGET_PAGE_BITS
1055
     *  and r0, r8, #(CPU_TLB_SIZE - 1)   @ Assumption: CPU_TLB_BITS <= 8
1056
     *  add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1057
     */
1058
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1059
                    TCG_REG_R8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1060
    tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1061
                    TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
1062
    tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
1063
                    TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
1064
    /* In the
1065
     *  ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1066
     * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1067
     * not exceed otherwise, so use an
1068
     *  add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1069
     * before.
1070
     */
1071
    if (mem_index)
1072
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
1073
                        (mem_index << (TLB_SHIFT & 1)) |
1074
                        ((16 - (TLB_SHIFT >> 1)) << 8));
1075
    tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
1076
                    offsetof(CPUState, tlb_table[0][0].addr_write));
1077
    tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
1078
                    TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1079
    /* Check alignment.  */
1080
    if (s_bits)
1081
        tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1082
                        0, addr_reg, (1 << s_bits) - 1);
1083
#  if TARGET_LONG_BITS == 64
1084
    /* XXX: possibly we could use a block data load or writeback in
1085
     * the first access.  */
1086
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1087
                    offsetof(CPUState, tlb_table[0][0].addr_write) + 4);
1088
    tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1089
                    TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
1090
#  endif
1091
    tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1092
                    offsetof(CPUState, tlb_table[0][0].addend));
1093

    
1094
    switch (opc) {
1095
    case 0:
1096
        tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1097
        break;
1098
    case 1:
1099
        tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1100
        break;
1101
    case 2:
1102
    default:
1103
        tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1104
        break;
1105
    case 3:
1106
        tcg_out_st32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1107
        tcg_out_st32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1108
        break;
1109
    }
1110

    
1111
    label_ptr = (void *) s->code_ptr;
1112
    tcg_out_b(s, COND_EQ, 8);
1113

    
1114
    /* TODO: move this code to where the constants pool will be */
1115
    if (addr_reg != TCG_REG_R0) {
1116
        tcg_out_dat_reg(s, cond, ARITH_MOV,
1117
                        TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1118
    }
1119
# if TARGET_LONG_BITS == 32
1120
    switch (opc) {
1121
    case 0:
1122
        tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_R1, data_reg, 0xff);
1123
        tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1124
        break;
1125
    case 1:
1126
        tcg_out_dat_reg(s, cond, ARITH_MOV,
1127
                        TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(16));
1128
        tcg_out_dat_reg(s, cond, ARITH_MOV,
1129
                        TCG_REG_R1, 0, TCG_REG_R1, SHIFT_IMM_LSR(16));
1130
        tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1131
        break;
1132
    case 2:
1133
        if (data_reg != TCG_REG_R1) {
1134
            tcg_out_dat_reg(s, cond, ARITH_MOV,
1135
                            TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0));
1136
        }
1137
        tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1138
        break;
1139
    case 3:
1140
        if (data_reg != TCG_REG_R1) {
1141
            tcg_out_dat_reg(s, cond, ARITH_MOV,
1142
                            TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0));
1143
        }
1144
        if (data_reg2 != TCG_REG_R2) {
1145
            tcg_out_dat_reg(s, cond, ARITH_MOV,
1146
                            TCG_REG_R2, 0, data_reg2, SHIFT_IMM_LSL(0));
1147
        }
1148
        tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1149
        break;
1150
    }
1151
# else
1152
    if (addr_reg2 != TCG_REG_R1) {
1153
        tcg_out_dat_reg(s, cond, ARITH_MOV,
1154
                        TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1155
    }
1156
    switch (opc) {
1157
    case 0:
1158
        tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_R2, data_reg, 0xff);
1159
        tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1160
        break;
1161
    case 1:
1162
        tcg_out_dat_reg(s, cond, ARITH_MOV,
1163
                        TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(16));
1164
        tcg_out_dat_reg(s, cond, ARITH_MOV,
1165
                        TCG_REG_R2, 0, TCG_REG_R2, SHIFT_IMM_LSR(16));
1166
        tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1167
        break;
1168
    case 2:
1169
        if (data_reg != TCG_REG_R2) {
1170
            tcg_out_dat_reg(s, cond, ARITH_MOV,
1171
                            TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1172
        }
1173
        tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1174
        break;
1175
    case 3:
1176
        tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R8, 0, mem_index);
1177
        tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1178
        if (data_reg != TCG_REG_R2) {
1179
            tcg_out_dat_reg(s, cond, ARITH_MOV,
1180
                            TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1181
        }
1182
        if (data_reg2 != TCG_REG_R3) {
1183
            tcg_out_dat_reg(s, cond, ARITH_MOV,
1184
                            TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1185
        }
1186
        break;
1187
    }
1188
# endif
1189

    
1190
    tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] -
1191
                    (tcg_target_long) s->code_ptr);
1192
# if TARGET_LONG_BITS == 64
1193
    if (opc == 3)
1194
        tcg_out_dat_imm(s, cond, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10);
1195
# endif
1196

    
1197
    *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
1198
#else /* !CONFIG_SOFTMMU */
1199
    if (GUEST_BASE) {
1200
        uint32_t offset = GUEST_BASE;
1201
        int i;
1202
        int rot;
1203

    
1204
        while (offset) {
1205
            i = ctz32(offset) & ~1;
1206
            rot = ((32 - i) << 7) & 0xf00;
1207

    
1208
            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R8, addr_reg,
1209
                            ((offset >> i) & 0xff) | rot);
1210
            addr_reg = TCG_REG_R8;
1211
            offset &= ~(0xff << i);
1212
        }
1213
    }
1214
    switch (opc) {
1215
    case 0:
1216
        tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1217
        break;
1218
    case 1:
1219
        tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
1220
        break;
1221
    case 2:
1222
    default:
1223
        tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1224
        break;
1225
    case 3:
1226
        /* TODO: use block store -
1227
         * check that data_reg2 > data_reg or the other way */
1228
        tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1229
        tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1230
        break;
1231
    }
1232
#endif
1233
}
1234

    
1235
static uint8_t *tb_ret_addr;
1236

    
1237
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1238
                const TCGArg *args, const int *const_args)
1239
{
1240
    int c;
1241

    
1242
    switch (opc) {
1243
    case INDEX_op_exit_tb:
1244
        {
1245
            uint8_t *ld_ptr = s->code_ptr;
1246
            if (args[0] >> 8)
1247
                tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1248
            else
1249
                tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
1250
            tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1251
            if (args[0] >> 8) {
1252
                *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1253
                tcg_out32(s, args[0]);
1254
            }
1255
        }
1256
        break;
1257
    case INDEX_op_goto_tb:
1258
        if (s->tb_jmp_offset) {
1259
            /* Direct jump method */
1260
#if defined(USE_DIRECT_JUMP)
1261
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1262
            tcg_out_b(s, COND_AL, 8);
1263
#else
1264
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1265
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1266
            tcg_out32(s, 0);
1267
#endif
1268
        } else {
1269
            /* Indirect jump method */
1270
#if 1
1271
            c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1272
            if (c > 0xfff || c < -0xfff) {
1273
                tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1274
                                (tcg_target_long) (s->tb_next + args[0]));
1275
                tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1276
            } else
1277
                tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
1278
#else
1279
            tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1280
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1281
            tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1282
#endif
1283
        }
1284
        s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1285
        break;
1286
    case INDEX_op_call:
1287
        if (const_args[0])
1288
            tcg_out_call(s, COND_AL, args[0]);
1289
        else
1290
            tcg_out_callr(s, COND_AL, args[0]);
1291
        break;
1292
    case INDEX_op_jmp:
1293
        if (const_args[0])
1294
            tcg_out_goto(s, COND_AL, args[0]);
1295
        else
1296
            tcg_out_bx(s, COND_AL, args[0]);
1297
        break;
1298
    case INDEX_op_br:
1299
        tcg_out_goto_label(s, COND_AL, args[0]);
1300
        break;
1301

    
1302
    case INDEX_op_ld8u_i32:
1303
        tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1304
        break;
1305
    case INDEX_op_ld8s_i32:
1306
        tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1307
        break;
1308
    case INDEX_op_ld16u_i32:
1309
        tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1310
        break;
1311
    case INDEX_op_ld16s_i32:
1312
        tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1313
        break;
1314
    case INDEX_op_ld_i32:
1315
        tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1316
        break;
1317
    case INDEX_op_st8_i32:
1318
        tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1319
        break;
1320
    case INDEX_op_st16_i32:
1321
        tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1322
        break;
1323
    case INDEX_op_st_i32:
1324
        tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1325
        break;
1326

    
1327
    case INDEX_op_mov_i32:
1328
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1329
                        args[0], 0, args[1], SHIFT_IMM_LSL(0));
1330
        break;
1331
    case INDEX_op_movi_i32:
1332
        tcg_out_movi32(s, COND_AL, args[0], args[1]);
1333
        break;
1334
    case INDEX_op_add_i32:
1335
        c = ARITH_ADD;
1336
        goto gen_arith;
1337
    case INDEX_op_sub_i32:
1338
        c = ARITH_SUB;
1339
        goto gen_arith;
1340
    case INDEX_op_and_i32:
1341
        c = ARITH_AND;
1342
        goto gen_arith;
1343
    case INDEX_op_andc_i32:
1344
        c = ARITH_BIC;
1345
        goto gen_arith;
1346
    case INDEX_op_or_i32:
1347
        c = ARITH_ORR;
1348
        goto gen_arith;
1349
    case INDEX_op_xor_i32:
1350
        c = ARITH_EOR;
1351
        /* Fall through.  */
1352
    gen_arith:
1353
        if (const_args[2]) {
1354
            int rot;
1355
            rot = encode_imm(args[2]);
1356
            tcg_out_dat_imm(s, COND_AL, c,
1357
                            args[0], args[1], rotl(args[2], rot) | (rot << 7));
1358
        } else
1359
            tcg_out_dat_reg(s, COND_AL, c,
1360
                            args[0], args[1], args[2], SHIFT_IMM_LSL(0));
1361
        break;
1362
    case INDEX_op_add2_i32:
1363
        tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
1364
                        args[0], args[1], args[2], args[3],
1365
                        args[4], args[5], SHIFT_IMM_LSL(0));
1366
        break;
1367
    case INDEX_op_sub2_i32:
1368
        tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
1369
                        args[0], args[1], args[2], args[3],
1370
                        args[4], args[5], SHIFT_IMM_LSL(0));
1371
        break;
1372
    case INDEX_op_neg_i32:
1373
        tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1374
        break;
1375
    case INDEX_op_not_i32:
1376
        tcg_out_dat_reg(s, COND_AL,
1377
                        ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1378
        break;
1379
    case INDEX_op_mul_i32:
1380
        tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1381
        break;
1382
    case INDEX_op_mulu2_i32:
1383
        tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1384
        break;
1385
    /* XXX: Perhaps args[2] & 0x1f is wrong */
1386
    case INDEX_op_shl_i32:
1387
        c = const_args[2] ?
1388
                SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1389
        goto gen_shift32;
1390
    case INDEX_op_shr_i32:
1391
        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1392
                SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1393
        goto gen_shift32;
1394
    case INDEX_op_sar_i32:
1395
        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1396
                SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1397
        /* Fall through.  */
1398
    gen_shift32:
1399
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1400
        break;
1401

    
1402
    case INDEX_op_brcond_i32:
1403
        if (const_args[1]) {
1404
            int rot;
1405
            rot = encode_imm(args[1]);
1406
            tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1407
                            args[0], rotl(args[1], rot) | (rot << 7));
1408
        } else {
1409
            tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1410
                            args[0], args[1], SHIFT_IMM_LSL(0));
1411
        }
1412
        tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1413
        break;
1414
    case INDEX_op_brcond2_i32:
1415
        /* The resulting conditions are:
1416
         * TCG_COND_EQ    -->  a0 == a2 && a1 == a3,
1417
         * TCG_COND_NE    --> (a0 != a2 && a1 == a3) ||  a1 != a3,
1418
         * TCG_COND_LT(U) --> (a0 <  a2 && a1 == a3) ||  a1 <  a3,
1419
         * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1420
         * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1421
         * TCG_COND_GT(U) --> (a0 >  a2 && a1 == a3) ||  a1 >  a3,
1422
         */
1423
        tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1424
                        args[1], args[3], SHIFT_IMM_LSL(0));
1425
        tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1426
                        args[0], args[2], SHIFT_IMM_LSL(0));
1427
        tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1428
        break;
1429
    case INDEX_op_setcond_i32:
1430
        if (const_args[2]) {
1431
            int rot;
1432
            rot = encode_imm(args[2]);
1433
            tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1434
                            args[1], rotl(args[2], rot) | (rot << 7));
1435
        } else {
1436
            tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1437
                            args[1], args[2], SHIFT_IMM_LSL(0));
1438
        }
1439
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1440
                        ARITH_MOV, args[0], 0, 1);
1441
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1442
                        ARITH_MOV, args[0], 0, 0);
1443
        break;
1444
    case INDEX_op_setcond2_i32:
1445
        /* See brcond2_i32 comment */
1446
        tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1447
                        args[2], args[4], SHIFT_IMM_LSL(0));
1448
        tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1449
                        args[1], args[3], SHIFT_IMM_LSL(0));
1450
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1451
                        ARITH_MOV, args[0], 0, 1);
1452
        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1453
                        ARITH_MOV, args[0], 0, 0);
1454
        break;
1455

    
1456
    case INDEX_op_qemu_ld8u:
1457
        tcg_out_qemu_ld(s, COND_AL, args, 0);
1458
        break;
1459
    case INDEX_op_qemu_ld8s:
1460
        tcg_out_qemu_ld(s, COND_AL, args, 0 | 4);
1461
        break;
1462
    case INDEX_op_qemu_ld16u:
1463
        tcg_out_qemu_ld(s, COND_AL, args, 1);
1464
        break;
1465
    case INDEX_op_qemu_ld16s:
1466
        tcg_out_qemu_ld(s, COND_AL, args, 1 | 4);
1467
        break;
1468
    case INDEX_op_qemu_ld32:
1469
        tcg_out_qemu_ld(s, COND_AL, args, 2);
1470
        break;
1471
    case INDEX_op_qemu_ld64:
1472
        tcg_out_qemu_ld(s, COND_AL, args, 3);
1473
        break;
1474

    
1475
    case INDEX_op_qemu_st8:
1476
        tcg_out_qemu_st(s, COND_AL, args, 0);
1477
        break;
1478
    case INDEX_op_qemu_st16:
1479
        tcg_out_qemu_st(s, COND_AL, args, 1);
1480
        break;
1481
    case INDEX_op_qemu_st32:
1482
        tcg_out_qemu_st(s, COND_AL, args, 2);
1483
        break;
1484
    case INDEX_op_qemu_st64:
1485
        tcg_out_qemu_st(s, COND_AL, args, 3);
1486
        break;
1487

    
1488
    case INDEX_op_ext8s_i32:
1489
        if (use_armv6_instructions) {
1490
            /* sxtb */
1491
            tcg_out32(s, 0xe6af0070 | (args[0] << 12) | args[1]);
1492
        } else {
1493
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1494
                            args[0], 0, args[1], SHIFT_IMM_LSL(24));
1495
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1496
                            args[0], 0, args[0], SHIFT_IMM_ASR(24));
1497
        }
1498
        break;
1499
    case INDEX_op_ext16s_i32:
1500
        if (use_armv6_instructions) {
1501
            /* sxth */
1502
            tcg_out32(s, 0xe6bf0070 | (args[0] << 12) | args[1]);
1503
        } else {
1504
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1505
                            args[0], 0, args[1], SHIFT_IMM_LSL(16));
1506
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1507
                            args[0], 0, args[0], SHIFT_IMM_ASR(16));
1508
        }
1509
        break;
1510

    
1511
    default:
1512
        tcg_abort();
1513
    }
1514
}
1515

    
1516
static const TCGTargetOpDef arm_op_defs[] = {
1517
    { INDEX_op_exit_tb, { } },
1518
    { INDEX_op_goto_tb, { } },
1519
    { INDEX_op_call, { "ri" } },
1520
    { INDEX_op_jmp, { "ri" } },
1521
    { INDEX_op_br, { } },
1522

    
1523
    { INDEX_op_mov_i32, { "r", "r" } },
1524
    { INDEX_op_movi_i32, { "r" } },
1525

    
1526
    { INDEX_op_ld8u_i32, { "r", "r" } },
1527
    { INDEX_op_ld8s_i32, { "r", "r" } },
1528
    { INDEX_op_ld16u_i32, { "r", "r" } },
1529
    { INDEX_op_ld16s_i32, { "r", "r" } },
1530
    { INDEX_op_ld_i32, { "r", "r" } },
1531
    { INDEX_op_st8_i32, { "r", "r" } },
1532
    { INDEX_op_st16_i32, { "r", "r" } },
1533
    { INDEX_op_st_i32, { "r", "r" } },
1534

    
1535
    /* TODO: "r", "r", "ri" */
1536
    { INDEX_op_add_i32, { "r", "r", "rI" } },
1537
    { INDEX_op_sub_i32, { "r", "r", "rI" } },
1538
    { INDEX_op_mul_i32, { "r", "r", "r" } },
1539
    { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1540
    { INDEX_op_and_i32, { "r", "r", "rI" } },
1541
    { INDEX_op_andc_i32, { "r", "r", "rI" } },
1542
    { INDEX_op_or_i32, { "r", "r", "rI" } },
1543
    { INDEX_op_xor_i32, { "r", "r", "rI" } },
1544
    { INDEX_op_neg_i32, { "r", "r" } },
1545
    { INDEX_op_not_i32, { "r", "r" } },
1546

    
1547
    { INDEX_op_shl_i32, { "r", "r", "ri" } },
1548
    { INDEX_op_shr_i32, { "r", "r", "ri" } },
1549
    { INDEX_op_sar_i32, { "r", "r", "ri" } },
1550

    
1551
    { INDEX_op_brcond_i32, { "r", "rI" } },
1552
    { INDEX_op_setcond_i32, { "r", "r", "rI" } },
1553

    
1554
    /* TODO: "r", "r", "r", "r", "ri", "ri" */
1555
    { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
1556
    { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
1557
    { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
1558
    { INDEX_op_setcond2_i32, { "r", "r", "r", "r", "r" } },
1559

    
1560
#if TARGET_LONG_BITS == 32
1561
    { INDEX_op_qemu_ld8u, { "r", "x" } },
1562
    { INDEX_op_qemu_ld8s, { "r", "x" } },
1563
    { INDEX_op_qemu_ld16u, { "r", "x" } },
1564
    { INDEX_op_qemu_ld16s, { "r", "x" } },
1565
    { INDEX_op_qemu_ld32, { "r", "x" } },
1566
    { INDEX_op_qemu_ld64, { "d", "r", "x" } },
1567

    
1568
    { INDEX_op_qemu_st8, { "x", "x" } },
1569
    { INDEX_op_qemu_st16, { "x", "x" } },
1570
    { INDEX_op_qemu_st32, { "x", "x" } },
1571
    { INDEX_op_qemu_st64, { "x", "D", "x" } },
1572
#else
1573
    { INDEX_op_qemu_ld8u, { "r", "x", "X" } },
1574
    { INDEX_op_qemu_ld8s, { "r", "x", "X" } },
1575
    { INDEX_op_qemu_ld16u, { "r", "x", "X" } },
1576
    { INDEX_op_qemu_ld16s, { "r", "x", "X" } },
1577
    { INDEX_op_qemu_ld32, { "r", "x", "X" } },
1578
    { INDEX_op_qemu_ld64, { "d", "r", "x", "X" } },
1579

    
1580
    { INDEX_op_qemu_st8, { "x", "x", "X" } },
1581
    { INDEX_op_qemu_st16, { "x", "x", "X" } },
1582
    { INDEX_op_qemu_st32, { "x", "x", "X" } },
1583
    { INDEX_op_qemu_st64, { "x", "D", "x", "X" } },
1584
#endif
1585

    
1586
    { INDEX_op_ext8s_i32, { "r", "r" } },
1587
    { INDEX_op_ext16s_i32, { "r", "r" } },
1588

    
1589
    { -1 },
1590
};
1591

    
1592
void tcg_target_init(TCGContext *s)
1593
{
1594
#if !defined(CONFIG_USER_ONLY)
1595
    /* fail safe */
1596
    if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1597
        tcg_abort();
1598
#endif
1599

    
1600
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
1601
    tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1602
                     (1 << TCG_REG_R0) |
1603
                     (1 << TCG_REG_R1) |
1604
                     (1 << TCG_REG_R2) |
1605
                     (1 << TCG_REG_R3) |
1606
                     (1 << TCG_REG_R12) |
1607
                     (1 << TCG_REG_R14));
1608

    
1609
    tcg_regset_clear(s->reserved_regs);
1610
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1611
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
1612
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
1613

    
1614
    tcg_add_target_add_op_defs(arm_op_defs);
1615
}
1616

    
1617
static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
1618
                int arg1, tcg_target_long arg2)
1619
{
1620
    tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
1621
}
1622

    
1623
static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
1624
                int arg1, tcg_target_long arg2)
1625
{
1626
    tcg_out_st32(s, COND_AL, arg, arg1, arg2);
1627
}
1628

    
1629
static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
1630
{
1631
    if (val > 0)
1632
        if (val < 0x100)
1633
            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val);
1634
        else
1635
            tcg_abort();
1636
    else if (val < 0) {
1637
        if (val > -0x100)
1638
            tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val);
1639
        else
1640
            tcg_abort();
1641
    }
1642
}
1643

    
1644
static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
1645
{
1646
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
1647
}
1648

    
1649
static inline void tcg_out_movi(TCGContext *s, TCGType type,
1650
                int ret, tcg_target_long arg)
1651
{
1652
    tcg_out_movi32(s, COND_AL, ret, arg);
1653
}
1654

    
1655
void tcg_target_qemu_prologue(TCGContext *s)
1656
{
1657
    /* There is no need to save r7, it is used to store the address
1658
       of the env structure and is not modified by GCC. */
1659

    
1660
    /* stmdb sp!, { r4 - r6, r8 - r11, lr } */
1661
    tcg_out32(s, (COND_AL << 28) | 0x092d4f70);
1662

    
1663
    tcg_out_bx(s, COND_AL, TCG_REG_R0);
1664
    tb_ret_addr = s->code_ptr;
1665

    
1666
    /* ldmia sp!, { r4 - r6, r8 - r11, pc } */
1667
    tcg_out32(s, (COND_AL << 28) | 0x08bd8f70);
1668
}