Statistics
| Branch: | Revision:

root / tcg / sparc / tcg-target.c @ 3cf246f0

History | View | Annotate | Download (54.5 kB)

1
/*
2
 * Tiny Code Generator for QEMU
3
 *
4
 * Copyright (c) 2008 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#include "tcg-be-null.h"
26

    
27
#ifndef NDEBUG
28
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29
    "%g0",
30
    "%g1",
31
    "%g2",
32
    "%g3",
33
    "%g4",
34
    "%g5",
35
    "%g6",
36
    "%g7",
37
    "%o0",
38
    "%o1",
39
    "%o2",
40
    "%o3",
41
    "%o4",
42
    "%o5",
43
    "%o6",
44
    "%o7",
45
    "%l0",
46
    "%l1",
47
    "%l2",
48
    "%l3",
49
    "%l4",
50
    "%l5",
51
    "%l6",
52
    "%l7",
53
    "%i0",
54
    "%i1",
55
    "%i2",
56
    "%i3",
57
    "%i4",
58
    "%i5",
59
    "%i6",
60
    "%i7",
61
};
62
#endif
63

    
64
/* Define some temporary registers.  T2 is used for constant generation.  */
65
#define TCG_REG_T1  TCG_REG_G1
66
#define TCG_REG_T2  TCG_REG_O7
67

    
68
#ifdef CONFIG_USE_GUEST_BASE
69
# define TCG_GUEST_BASE_REG TCG_REG_I5
70
#else
71
# define TCG_GUEST_BASE_REG TCG_REG_G0
72
#endif
73

    
74
static const int tcg_target_reg_alloc_order[] = {
75
    TCG_REG_L0,
76
    TCG_REG_L1,
77
    TCG_REG_L2,
78
    TCG_REG_L3,
79
    TCG_REG_L4,
80
    TCG_REG_L5,
81
    TCG_REG_L6,
82
    TCG_REG_L7,
83

    
84
    TCG_REG_I0,
85
    TCG_REG_I1,
86
    TCG_REG_I2,
87
    TCG_REG_I3,
88
    TCG_REG_I4,
89
    TCG_REG_I5,
90

    
91
    TCG_REG_G2,
92
    TCG_REG_G3,
93
    TCG_REG_G4,
94
    TCG_REG_G5,
95

    
96
    TCG_REG_O0,
97
    TCG_REG_O1,
98
    TCG_REG_O2,
99
    TCG_REG_O3,
100
    TCG_REG_O4,
101
    TCG_REG_O5,
102
};
103

    
104
static const int tcg_target_call_iarg_regs[6] = {
105
    TCG_REG_O0,
106
    TCG_REG_O1,
107
    TCG_REG_O2,
108
    TCG_REG_O3,
109
    TCG_REG_O4,
110
    TCG_REG_O5,
111
};
112

    
113
static const int tcg_target_call_oarg_regs[] = {
114
    TCG_REG_O0,
115
    TCG_REG_O1,
116
    TCG_REG_O2,
117
    TCG_REG_O3,
118
};
119

    
120
#define INSN_OP(x)  ((x) << 30)
121
#define INSN_OP2(x) ((x) << 22)
122
#define INSN_OP3(x) ((x) << 19)
123
#define INSN_OPF(x) ((x) << 5)
124
#define INSN_RD(x)  ((x) << 25)
125
#define INSN_RS1(x) ((x) << 14)
126
#define INSN_RS2(x) (x)
127
#define INSN_ASI(x) ((x) << 5)
128

    
129
#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
130
#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
131
#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
132
#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
133
#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
134
#define INSN_COND(x) ((x) << 25)
135

    
136
#define COND_N     0x0
137
#define COND_E     0x1
138
#define COND_LE    0x2
139
#define COND_L     0x3
140
#define COND_LEU   0x4
141
#define COND_CS    0x5
142
#define COND_NEG   0x6
143
#define COND_VS    0x7
144
#define COND_A     0x8
145
#define COND_NE    0x9
146
#define COND_G     0xa
147
#define COND_GE    0xb
148
#define COND_GU    0xc
149
#define COND_CC    0xd
150
#define COND_POS   0xe
151
#define COND_VC    0xf
152
#define BA         (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
153

    
154
#define RCOND_Z    1
155
#define RCOND_LEZ  2
156
#define RCOND_LZ   3
157
#define RCOND_NZ   5
158
#define RCOND_GZ   6
159
#define RCOND_GEZ  7
160

    
161
#define MOVCC_ICC  (1 << 18)
162
#define MOVCC_XCC  (1 << 18 | 1 << 12)
163

    
164
#define BPCC_ICC   0
165
#define BPCC_XCC   (2 << 20)
166
#define BPCC_PT    (1 << 19)
167
#define BPCC_PN    0
168
#define BPCC_A     (1 << 29)
169

    
170
#define BPR_PT     BPCC_PT
171

    
172
#define ARITH_ADD  (INSN_OP(2) | INSN_OP3(0x00))
173
#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
174
#define ARITH_AND  (INSN_OP(2) | INSN_OP3(0x01))
175
#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
176
#define ARITH_OR   (INSN_OP(2) | INSN_OP3(0x02))
177
#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
178
#define ARITH_ORN  (INSN_OP(2) | INSN_OP3(0x06))
179
#define ARITH_XOR  (INSN_OP(2) | INSN_OP3(0x03))
180
#define ARITH_SUB  (INSN_OP(2) | INSN_OP3(0x04))
181
#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
182
#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
183
#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
184
#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
185
#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
186
#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
187
#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
188
#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
189
#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
190
#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
191
#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
192

    
193
#define SHIFT_SLL  (INSN_OP(2) | INSN_OP3(0x25))
194
#define SHIFT_SRL  (INSN_OP(2) | INSN_OP3(0x26))
195
#define SHIFT_SRA  (INSN_OP(2) | INSN_OP3(0x27))
196

    
197
#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
198
#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
199
#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
200

    
201
#define RDY        (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
202
#define WRY        (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
203
#define JMPL       (INSN_OP(2) | INSN_OP3(0x38))
204
#define SAVE       (INSN_OP(2) | INSN_OP3(0x3c))
205
#define RESTORE    (INSN_OP(2) | INSN_OP3(0x3d))
206
#define SETHI      (INSN_OP(0) | INSN_OP2(0x4))
207
#define CALL       INSN_OP(1)
208
#define LDUB       (INSN_OP(3) | INSN_OP3(0x01))
209
#define LDSB       (INSN_OP(3) | INSN_OP3(0x09))
210
#define LDUH       (INSN_OP(3) | INSN_OP3(0x02))
211
#define LDSH       (INSN_OP(3) | INSN_OP3(0x0a))
212
#define LDUW       (INSN_OP(3) | INSN_OP3(0x00))
213
#define LDSW       (INSN_OP(3) | INSN_OP3(0x08))
214
#define LDX        (INSN_OP(3) | INSN_OP3(0x0b))
215
#define STB        (INSN_OP(3) | INSN_OP3(0x05))
216
#define STH        (INSN_OP(3) | INSN_OP3(0x06))
217
#define STW        (INSN_OP(3) | INSN_OP3(0x04))
218
#define STX        (INSN_OP(3) | INSN_OP3(0x0e))
219
#define LDUBA      (INSN_OP(3) | INSN_OP3(0x11))
220
#define LDSBA      (INSN_OP(3) | INSN_OP3(0x19))
221
#define LDUHA      (INSN_OP(3) | INSN_OP3(0x12))
222
#define LDSHA      (INSN_OP(3) | INSN_OP3(0x1a))
223
#define LDUWA      (INSN_OP(3) | INSN_OP3(0x10))
224
#define LDSWA      (INSN_OP(3) | INSN_OP3(0x18))
225
#define LDXA       (INSN_OP(3) | INSN_OP3(0x1b))
226
#define STBA       (INSN_OP(3) | INSN_OP3(0x15))
227
#define STHA       (INSN_OP(3) | INSN_OP3(0x16))
228
#define STWA       (INSN_OP(3) | INSN_OP3(0x14))
229
#define STXA       (INSN_OP(3) | INSN_OP3(0x1e))
230

    
231
#ifndef ASI_PRIMARY_LITTLE
232
#define ASI_PRIMARY_LITTLE 0x88
233
#endif
234

    
235
#define LDUH_LE    (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
236
#define LDSH_LE    (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
237
#define LDUW_LE    (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
238
#define LDSW_LE    (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
239
#define LDX_LE     (LDXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
240

    
241
#define STH_LE     (STHA  | INSN_ASI(ASI_PRIMARY_LITTLE))
242
#define STW_LE     (STWA  | INSN_ASI(ASI_PRIMARY_LITTLE))
243
#define STX_LE     (STXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
244

    
245
static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
246
{
247
    return (val << ((sizeof(tcg_target_long) * 8 - bits))
248
            >> (sizeof(tcg_target_long) * 8 - bits)) == val;
249
}
250

    
251
static inline int check_fit_i32(uint32_t val, unsigned int bits)
252
{
253
    return ((val << (32 - bits)) >> (32 - bits)) == val;
254
}
255

    
256
static void patch_reloc(uint8_t *code_ptr, int type,
257
                        intptr_t value, intptr_t addend)
258
{
259
    uint32_t insn;
260
    value += addend;
261
    switch (type) {
262
    case R_SPARC_32:
263
        if (value != (uint32_t)value) {
264
            tcg_abort();
265
        }
266
        *(uint32_t *)code_ptr = value;
267
        break;
268
    case R_SPARC_WDISP16:
269
        value -= (intptr_t)code_ptr;
270
        if (!check_fit_tl(value >> 2, 16)) {
271
            tcg_abort();
272
        }
273
        insn = *(uint32_t *)code_ptr;
274
        insn &= ~INSN_OFF16(-1);
275
        insn |= INSN_OFF16(value);
276
        *(uint32_t *)code_ptr = insn;
277
        break;
278
    case R_SPARC_WDISP19:
279
        value -= (intptr_t)code_ptr;
280
        if (!check_fit_tl(value >> 2, 19)) {
281
            tcg_abort();
282
        }
283
        insn = *(uint32_t *)code_ptr;
284
        insn &= ~INSN_OFF19(-1);
285
        insn |= INSN_OFF19(value);
286
        *(uint32_t *)code_ptr = insn;
287
        break;
288
    default:
289
        tcg_abort();
290
    }
291
}
292

    
293
/* parse target specific constraints */
294
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
295
{
296
    const char *ct_str;
297

    
298
    ct_str = *pct_str;
299
    switch (ct_str[0]) {
300
    case 'r':
301
        ct->ct |= TCG_CT_REG;
302
        tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
303
        break;
304
    case 'L': /* qemu_ld/st constraint */
305
        ct->ct |= TCG_CT_REG;
306
        tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
307
        // Helper args
308
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
309
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
310
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
311
        break;
312
    case 'I':
313
        ct->ct |= TCG_CT_CONST_S11;
314
        break;
315
    case 'J':
316
        ct->ct |= TCG_CT_CONST_S13;
317
        break;
318
    case 'Z':
319
        ct->ct |= TCG_CT_CONST_ZERO;
320
        break;
321
    default:
322
        return -1;
323
    }
324
    ct_str++;
325
    *pct_str = ct_str;
326
    return 0;
327
}
328

    
329
/* test if a constant matches the constraint */
330
static inline int tcg_target_const_match(tcg_target_long val,
331
                                         const TCGArgConstraint *arg_ct)
332
{
333
    int ct = arg_ct->ct;
334

    
335
    if (ct & TCG_CT_CONST) {
336
        return 1;
337
    } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
338
        return 1;
339
    } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
340
        return 1;
341
    } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
342
        return 1;
343
    } else {
344
        return 0;
345
    }
346
}
347

    
348
static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
349
                                 int op)
350
{
351
    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
352
              INSN_RS2(rs2));
353
}
354

    
355
static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
356
                                  uint32_t offset, int op)
357
{
358
    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
359
              INSN_IMM13(offset));
360
}
361

    
362
static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
363
                           int val2, int val2const, int op)
364
{
365
    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
366
              | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
367
}
368

    
369
static inline void tcg_out_mov(TCGContext *s, TCGType type,
370
                               TCGReg ret, TCGReg arg)
371
{
372
    if (ret != arg) {
373
        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
374
    }
375
}
376

    
377
static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
378
{
379
    tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
380
}
381

    
382
static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
383
{
384
    tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
385
}
386

    
387
static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
388
{
389
    if (check_fit_tl(arg, 13))
390
        tcg_out_movi_imm13(s, ret, arg);
391
    else {
392
        tcg_out_sethi(s, ret, arg);
393
        if (arg & 0x3ff)
394
            tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
395
    }
396
}
397

    
398
static inline void tcg_out_movi(TCGContext *s, TCGType type,
399
                                TCGReg ret, tcg_target_long arg)
400
{
401
    /* All 32-bit constants, as well as 64-bit constants with
402
       no high bits set go through movi_imm32.  */
403
    if (TCG_TARGET_REG_BITS == 32
404
        || type == TCG_TYPE_I32
405
        || (arg & ~(tcg_target_long)0xffffffff) == 0) {
406
        tcg_out_movi_imm32(s, ret, arg);
407
    } else if (check_fit_tl(arg, 13)) {
408
        /* A 13-bit constant sign-extended to 64-bits.  */
409
        tcg_out_movi_imm13(s, ret, arg);
410
    } else if (check_fit_tl(arg, 32)) {
411
        /* A 32-bit constant sign-extended to 64-bits.  */
412
        tcg_out_sethi(s, ret, ~arg);
413
        tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
414
    } else {
415
        tcg_out_movi_imm32(s, ret, arg >> (TCG_TARGET_REG_BITS / 2));
416
        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
417
        tcg_out_movi_imm32(s, TCG_REG_T2, arg);
418
        tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
419
    }
420
}
421

    
422
static inline void tcg_out_ldst_rr(TCGContext *s, int data, int a1,
423
                                   int a2, int op)
424
{
425
    tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
426
}
427

    
428
static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
429
                                int offset, int op)
430
{
431
    if (check_fit_tl(offset, 13)) {
432
        tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
433
                  INSN_IMM13(offset));
434
    } else {
435
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
436
        tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
437
    }
438
}
439

    
440
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
441
                              TCGReg arg1, intptr_t arg2)
442
{
443
    tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
444
}
445

    
446
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
447
                              TCGReg arg1, intptr_t arg2)
448
{
449
    tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
450
}
451

    
452
static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
453
                                  tcg_target_long arg)
454
{
455
    if (!check_fit_tl(arg, 10)) {
456
        tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
457
    }
458
    tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
459
}
460

    
461
static inline void tcg_out_sety(TCGContext *s, int rs)
462
{
463
    tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
464
}
465

    
466
static inline void tcg_out_rdy(TCGContext *s, int rd)
467
{
468
    tcg_out32(s, RDY | INSN_RD(rd));
469
}
470

    
471
static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
472
{
473
    if (val != 0) {
474
        if (check_fit_tl(val, 13))
475
            tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
476
        else {
477
            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, val);
478
            tcg_out_arith(s, reg, reg, TCG_REG_T1, ARITH_ADD);
479
        }
480
    }
481
}
482

    
483
static inline void tcg_out_andi(TCGContext *s, int rd, int rs,
484
                                tcg_target_long val)
485
{
486
    if (val != 0) {
487
        if (check_fit_tl(val, 13))
488
            tcg_out_arithi(s, rd, rs, val, ARITH_AND);
489
        else {
490
            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T1, val);
491
            tcg_out_arith(s, rd, rs, TCG_REG_T1, ARITH_AND);
492
        }
493
    }
494
}
495

    
496
static void tcg_out_div32(TCGContext *s, int rd, int rs1,
497
                          int val2, int val2const, int uns)
498
{
499
    /* Load Y with the sign/zero extension of RS1 to 64-bits.  */
500
    if (uns) {
501
        tcg_out_sety(s, TCG_REG_G0);
502
    } else {
503
        tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
504
        tcg_out_sety(s, TCG_REG_T1);
505
    }
506

    
507
    tcg_out_arithc(s, rd, rs1, val2, val2const,
508
                   uns ? ARITH_UDIV : ARITH_SDIV);
509
}
510

    
511
static inline void tcg_out_nop(TCGContext *s)
512
{
513
    tcg_out_sethi(s, TCG_REG_G0, 0);
514
}
515

    
516
static const uint8_t tcg_cond_to_bcond[] = {
517
    [TCG_COND_EQ] = COND_E,
518
    [TCG_COND_NE] = COND_NE,
519
    [TCG_COND_LT] = COND_L,
520
    [TCG_COND_GE] = COND_GE,
521
    [TCG_COND_LE] = COND_LE,
522
    [TCG_COND_GT] = COND_G,
523
    [TCG_COND_LTU] = COND_CS,
524
    [TCG_COND_GEU] = COND_CC,
525
    [TCG_COND_LEU] = COND_LEU,
526
    [TCG_COND_GTU] = COND_GU,
527
};
528

    
529
static const uint8_t tcg_cond_to_rcond[] = {
530
    [TCG_COND_EQ] = RCOND_Z,
531
    [TCG_COND_NE] = RCOND_NZ,
532
    [TCG_COND_LT] = RCOND_LZ,
533
    [TCG_COND_GT] = RCOND_GZ,
534
    [TCG_COND_LE] = RCOND_LEZ,
535
    [TCG_COND_GE] = RCOND_GEZ
536
};
537

    
538
static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
539
{
540
    tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
541
}
542

    
543
static void tcg_out_bpcc(TCGContext *s, int scond, int flags, int label)
544
{
545
    TCGLabel *l = &s->labels[label];
546
    int off19;
547

    
548
    if (l->has_value) {
549
        off19 = INSN_OFF19(l->u.value - (unsigned long)s->code_ptr);
550
    } else {
551
        /* Make sure to preserve destinations during retranslation.  */
552
        off19 = *(uint32_t *)s->code_ptr & INSN_OFF19(-1);
553
        tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label, 0);
554
    }
555
    tcg_out_bpcc0(s, scond, flags, off19);
556
}
557

    
558
static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
559
{
560
    tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
561
}
562

    
563
static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGArg arg1,
564
                               TCGArg arg2, int const_arg2, int label)
565
{
566
    tcg_out_cmp(s, arg1, arg2, const_arg2);
567
    tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, label);
568
    tcg_out_nop(s);
569
}
570

    
571
static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGArg ret,
572
                          TCGArg v1, int v1const)
573
{
574
    tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
575
              | INSN_RS1(tcg_cond_to_bcond[cond])
576
              | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
577
}
578

    
579
static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
580
                                TCGArg c1, TCGArg c2, int c2const,
581
                                TCGArg v1, int v1const)
582
{
583
    tcg_out_cmp(s, c1, c2, c2const);
584
    tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
585
}
586

    
587
#if TCG_TARGET_REG_BITS == 64
588
static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGArg arg1,
589
                               TCGArg arg2, int const_arg2, int label)
590
{
591
    /* For 64-bit signed comparisons vs zero, we can avoid the compare.  */
592
    if (arg2 == 0 && !is_unsigned_cond(cond)) {
593
        TCGLabel *l = &s->labels[label];
594
        int off16;
595

    
596
        if (l->has_value) {
597
            off16 = INSN_OFF16(l->u.value - (unsigned long)s->code_ptr);
598
        } else {
599
            /* Make sure to preserve destinations during retranslation.  */
600
            off16 = *(uint32_t *)s->code_ptr & INSN_OFF16(-1);
601
            tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, label, 0);
602
        }
603
        tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
604
                  | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
605
    } else {
606
        tcg_out_cmp(s, arg1, arg2, const_arg2);
607
        tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, label);
608
    }
609
    tcg_out_nop(s);
610
}
611

    
612
static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGArg ret, TCGArg c1,
613
                         TCGArg v1, int v1const)
614
{
615
    tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
616
              | (tcg_cond_to_rcond[cond] << 10)
617
              | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
618
}
619

    
620
static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
621
                                TCGArg c1, TCGArg c2, int c2const,
622
                                TCGArg v1, int v1const)
623
{
624
    /* For 64-bit signed comparisons vs zero, we can avoid the compare.
625
       Note that the immediate range is one bit smaller, so we must check
626
       for that as well.  */
627
    if (c2 == 0 && !is_unsigned_cond(cond)
628
        && (!v1const || check_fit_tl(v1, 10))) {
629
        tcg_out_movr(s, cond, ret, c1, v1, v1const);
630
    } else {
631
        tcg_out_cmp(s, c1, c2, c2const);
632
        tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
633
    }
634
}
635
#else
636
static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
637
                                TCGArg al, TCGArg ah,
638
                                TCGArg bl, int blconst,
639
                                TCGArg bh, int bhconst, int label_dest)
640
{
641
    int scond, label_next = gen_new_label();
642

    
643
    tcg_out_cmp(s, ah, bh, bhconst);
644

    
645
    /* Note that we fill one of the delay slots with the second compare.  */
646
    switch (cond) {
647
    case TCG_COND_EQ:
648
        tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_next);
649
        tcg_out_cmp(s, al, bl, blconst);
650
        tcg_out_bpcc(s, COND_E, BPCC_ICC | BPCC_PT, label_dest);
651
        break;
652

    
653
    case TCG_COND_NE:
654
        tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_dest);
655
        tcg_out_cmp(s, al, bl, blconst);
656
        tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_dest);
657
        break;
658

    
659
    default:
660
        scond = tcg_cond_to_bcond[tcg_high_cond(cond)];
661
        tcg_out_bpcc(s, scond, BPCC_ICC | BPCC_PT, label_dest);
662
        tcg_out_nop(s);
663
        tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_next);
664
        tcg_out_cmp(s, al, bl, blconst);
665
        scond = tcg_cond_to_bcond[tcg_unsigned_cond(cond)];
666
        tcg_out_bpcc(s, scond, BPCC_ICC | BPCC_PT, label_dest);
667
        break;
668
    }
669
    tcg_out_nop(s);
670

    
671
    tcg_out_label(s, label_next, s->code_ptr);
672
}
673
#endif
674

    
675
static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
676
                                TCGArg c1, TCGArg c2, int c2const)
677
{
678
    /* For 32-bit comparisons, we can play games with ADDX/SUBX.  */
679
    switch (cond) {
680
    case TCG_COND_LTU:
681
    case TCG_COND_GEU:
682
        /* The result of the comparison is in the carry bit.  */
683
        break;
684

    
685
    case TCG_COND_EQ:
686
    case TCG_COND_NE:
687
        /* For equality, we can transform to inequality vs zero.  */
688
        if (c2 != 0) {
689
            tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
690
        }
691
        c1 = TCG_REG_G0, c2 = ret, c2const = 0;
692
        cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
693
        break;
694

    
695
    case TCG_COND_GTU:
696
    case TCG_COND_LEU:
697
        /* If we don't need to load a constant into a register, we can
698
           swap the operands on GTU/LEU.  There's no benefit to loading
699
           the constant into a temporary register.  */
700
        if (!c2const || c2 == 0) {
701
            TCGArg t = c1;
702
            c1 = c2;
703
            c2 = t;
704
            c2const = 0;
705
            cond = tcg_swap_cond(cond);
706
            break;
707
        }
708
        /* FALLTHRU */
709

    
710
    default:
711
        tcg_out_cmp(s, c1, c2, c2const);
712
        tcg_out_movi_imm13(s, ret, 0);
713
        tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
714
        return;
715
    }
716

    
717
    tcg_out_cmp(s, c1, c2, c2const);
718
    if (cond == TCG_COND_LTU) {
719
        tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
720
    } else {
721
        tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
722
    }
723
}
724

    
725
#if TCG_TARGET_REG_BITS == 64
726
static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
727
                                TCGArg c1, TCGArg c2, int c2const)
728
{
729
    /* For 64-bit signed comparisons vs zero, we can avoid the compare
730
       if the input does not overlap the output.  */
731
    if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
732
        tcg_out_movi_imm13(s, ret, 0);
733
        tcg_out_movr(s, cond, ret, c1, 1, 1);
734
    } else {
735
        tcg_out_cmp(s, c1, c2, c2const);
736
        tcg_out_movi_imm13(s, ret, 0);
737
        tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
738
    }
739
}
740
#else
741
static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
742
                                 TCGArg al, TCGArg ah,
743
                                 TCGArg bl, int blconst,
744
                                 TCGArg bh, int bhconst)
745
{
746
    int tmp = TCG_REG_T1;
747

    
748
    /* Note that the low parts are fully consumed before tmp is set.  */
749
    if (ret != ah && (bhconst || ret != bh)) {
750
        tmp = ret;
751
    }
752

    
753
    switch (cond) {
754
    case TCG_COND_EQ:
755
    case TCG_COND_NE:
756
        if (bl == 0 && bh == 0) {
757
            if (cond == TCG_COND_EQ) {
758
                tcg_out_arith(s, TCG_REG_G0, al, ah, ARITH_ORCC);
759
                tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
760
            } else {
761
                tcg_out_arith(s, ret, al, ah, ARITH_ORCC);
762
            }
763
        } else {
764
            tcg_out_setcond_i32(s, cond, tmp, al, bl, blconst);
765
            tcg_out_cmp(s, ah, bh, bhconst);
766
            tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
767
        }
768
        tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, cond == TCG_COND_NE, 1);
769
        break;
770

    
771
    default:
772
        /* <= : ah < bh | (ah == bh && al <= bl) */
773
        tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), tmp, al, bl, blconst);
774
        tcg_out_cmp(s, ah, bh, bhconst);
775
        tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
776
        tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, 0, 1);
777
        tcg_out_movcc(s, tcg_high_cond(cond), MOVCC_ICC, ret, 1, 1);
778
        break;
779
    }
780
}
781
#endif
782

    
783
static void tcg_out_addsub2(TCGContext *s, TCGArg rl, TCGArg rh,
784
                            TCGArg al, TCGArg ah, TCGArg bl, int blconst,
785
                            TCGArg bh, int bhconst, int opl, int oph)
786
{
787
    TCGArg tmp = TCG_REG_T1;
788

    
789
    /* Note that the low parts are fully consumed before tmp is set.  */
790
    if (rl != ah && (bhconst || rl != bh)) {
791
        tmp = rl;
792
    }
793

    
794
    tcg_out_arithc(s, tmp, al, bl, blconst, opl);
795
    tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
796
    tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
797
}
798

    
799
/* Generate global QEMU prologue and epilogue code */
800
static void tcg_target_qemu_prologue(TCGContext *s)
801
{
802
    int tmp_buf_size, frame_size;
803

    
804
    /* The TCG temp buffer is at the top of the frame, immediately
805
       below the frame pointer.  */
806
    tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
807
    tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
808
                  tmp_buf_size);
809

    
810
    /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
811
       otherwise the minimal frame usable by callees.  */
812
    frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
813
    frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
814
    frame_size += TCG_TARGET_STACK_ALIGN - 1;
815
    frame_size &= -TCG_TARGET_STACK_ALIGN;
816
    tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
817
              INSN_IMM13(-frame_size));
818

    
819
#ifdef CONFIG_USE_GUEST_BASE
820
    if (GUEST_BASE != 0) {
821
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
822
        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
823
    }
824
#endif
825

    
826
    tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I1) |
827
              INSN_RS2(TCG_REG_G0));
828
    /* delay slot */
829
    tcg_out_nop(s);
830

    
831
    /* No epilogue required.  We issue ret + restore directly in the TB.  */
832
}
833

    
834
#if defined(CONFIG_SOFTMMU)
835

    
836
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
837
   int mmu_idx) */
838
static const void * const qemu_ld_helpers[4] = {
839
    helper_ldb_mmu,
840
    helper_ldw_mmu,
841
    helper_ldl_mmu,
842
    helper_ldq_mmu,
843
};
844

    
845
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
846
   uintxx_t val, int mmu_idx) */
847
static const void * const qemu_st_helpers[4] = {
848
    helper_stb_mmu,
849
    helper_stw_mmu,
850
    helper_stl_mmu,
851
    helper_stq_mmu,
852
};
853

    
854
/* Perform the TLB load and compare.
855

856
   Inputs:
857
   ADDRLO_IDX contains the index into ARGS of the low part of the
858
   address; the high part of the address is at ADDR_LOW_IDX+1.
859

860
   MEM_INDEX and S_BITS are the memory context and log2 size of the load.
861

862
   WHICH is the offset into the CPUTLBEntry structure of the slot to read.
863
   This should be offsetof addr_read or addr_write.
864

865
   The result of the TLB comparison is in %[ix]cc.  The sanitized address
866
   is in the returned register, maybe %o0.  The TLB addend is in %o1.  */
867

    
868
static int tcg_out_tlb_load(TCGContext *s, int addrlo_idx, int mem_index,
869
                            int s_bits, const TCGArg *args, int which)
870
{
871
    const int addrlo = args[addrlo_idx];
872
    const int r0 = TCG_REG_O0;
873
    const int r1 = TCG_REG_O1;
874
    const int r2 = TCG_REG_O2;
875
    int addr = addrlo;
876
    int tlb_ofs;
877

    
878
    if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
879
        /* Assemble the 64-bit address in R0.  */
880
        tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
881
        tcg_out_arithi(s, r1, args[addrlo_idx + 1], 32, SHIFT_SLLX);
882
        tcg_out_arith(s, r0, r0, r1, ARITH_OR);
883
    }
884

    
885
    /* Shift the page number down to tlb-entry.  */
886
    tcg_out_arithi(s, r1, addrlo,
887
                   TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
888

    
889
    /* Mask out the page offset, except for the required alignment.  */
890
    tcg_out_andi(s, r0, addr, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
891

    
892
    /* Compute tlb index, modulo tlb size.  */
893
    tcg_out_andi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
894

    
895
    /* Relative to the current ENV.  */
896
    tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
897

    
898
    /* Find a base address that can load both tlb comparator and addend.  */
899
    tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
900
    if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
901
        tcg_out_addi(s, r1, tlb_ofs);
902
        tlb_ofs = 0;
903
    }
904

    
905
    /* Load the tlb comparator and the addend.  */
906
    tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
907
    tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
908

    
909
    /* subcc arg0, arg2, %g0 */
910
    tcg_out_cmp(s, r0, r2, 0);
911

    
912
    /* If the guest address must be zero-extended, do so now.  */
913
    if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
914
        tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
915
        return r0;
916
    }
917
    return addrlo;
918
}
919
#endif /* CONFIG_SOFTMMU */
920

    
921
static const int qemu_ld_opc[8] = {
922
#ifdef TARGET_WORDS_BIGENDIAN
923
    LDUB, LDUH, LDUW, LDX, LDSB, LDSH, LDSW, LDX
924
#else
925
    LDUB, LDUH_LE, LDUW_LE, LDX_LE, LDSB, LDSH_LE, LDSW_LE, LDX_LE
926
#endif
927
};
928

    
929
static const int qemu_st_opc[4] = {
930
#ifdef TARGET_WORDS_BIGENDIAN
931
    STB, STH, STW, STX
932
#else
933
    STB, STH_LE, STW_LE, STX_LE
934
#endif
935
};
936

    
937
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
938
{
939
    int addrlo_idx = 1, datalo, datahi, addr_reg;
940
#if defined(CONFIG_SOFTMMU)
941
    int memi_idx, memi, s_bits, n;
942
    uint32_t *label_ptr[2];
943
#endif
944

    
945
    datahi = datalo = args[0];
946
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
947
        datahi = args[1];
948
        addrlo_idx = 2;
949
    }
950

    
951
#if defined(CONFIG_SOFTMMU)
952
    memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
953
    memi = args[memi_idx];
954
    s_bits = sizeop & 3;
955

    
956
    addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, s_bits, args,
957
                                offsetof(CPUTLBEntry, addr_read));
958

    
959
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
960
        int reg64;
961

    
962
        /* bne,pn %[xi]cc, label0 */
963
        label_ptr[0] = (uint32_t *)s->code_ptr;
964
        tcg_out_bpcc0(s, COND_NE, BPCC_PN
965
                      | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
966

    
967
        /* TLB Hit.  */
968
        /* Load all 64-bits into an O/G register.  */
969
        reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
970
        tcg_out_ldst_rr(s, reg64, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
971

    
972
        /* Move the two 32-bit pieces into the destination registers.  */
973
        tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
974
        if (reg64 != datalo) {
975
            tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
976
        }
977

    
978
        /* b,a,pt label1 */
979
        label_ptr[1] = (uint32_t *)s->code_ptr;
980
        tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
981
    } else {
982
        /* The fast path is exactly one insn.  Thus we can perform the
983
           entire TLB Hit in the (annulled) delay slot of the branch
984
           over the TLB Miss case.  */
985

    
986
        /* beq,a,pt %[xi]cc, label0 */
987
        label_ptr[0] = NULL;
988
        label_ptr[1] = (uint32_t *)s->code_ptr;
989
        tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
990
                      | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
991
        /* delay slot */
992
        tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
993
    }
994

    
995
    /* TLB Miss.  */
996

    
997
    if (label_ptr[0]) {
998
        *label_ptr[0] |= INSN_OFF19((unsigned long)s->code_ptr -
999
                                    (unsigned long)label_ptr[0]);
1000
    }
1001
    n = 0;
1002
    tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1003
    if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1004
        tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1005
                    args[addrlo_idx + 1]);
1006
    }
1007
    tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1008
                args[addrlo_idx]);
1009

    
1010
    /* qemu_ld_helper[s_bits](arg0, arg1) */
1011
    tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
1012
                           - (tcg_target_ulong)s->code_ptr) >> 2)
1013
                         & 0x3fffffff));
1014
    /* delay slot */
1015
    tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[n], memi);
1016

    
1017
    n = tcg_target_call_oarg_regs[0];
1018
    /* datalo = sign_extend(arg0) */
1019
    switch (sizeop) {
1020
    case 0 | 4:
1021
        /* Recall that SRA sign extends from bit 31 through bit 63.  */
1022
        tcg_out_arithi(s, datalo, n, 24, SHIFT_SLL);
1023
        tcg_out_arithi(s, datalo, datalo, 24, SHIFT_SRA);
1024
        break;
1025
    case 1 | 4:
1026
        tcg_out_arithi(s, datalo, n, 16, SHIFT_SLL);
1027
        tcg_out_arithi(s, datalo, datalo, 16, SHIFT_SRA);
1028
        break;
1029
    case 2 | 4:
1030
        tcg_out_arithi(s, datalo, n, 0, SHIFT_SRA);
1031
        break;
1032
    case 3:
1033
        if (TCG_TARGET_REG_BITS == 32) {
1034
            tcg_out_mov(s, TCG_TYPE_REG, datahi, n);
1035
            tcg_out_mov(s, TCG_TYPE_REG, datalo, n + 1);
1036
            break;
1037
        }
1038
        /* FALLTHRU */
1039
    case 0:
1040
    case 1:
1041
    case 2:
1042
    default:
1043
        /* mov */
1044
        tcg_out_mov(s, TCG_TYPE_REG, datalo, n);
1045
        break;
1046
    }
1047

    
1048
    *label_ptr[1] |= INSN_OFF19((unsigned long)s->code_ptr -
1049
                                (unsigned long)label_ptr[1]);
1050
#else
1051
    addr_reg = args[addrlo_idx];
1052
    if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
1053
        tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1054
        addr_reg = TCG_REG_T1;
1055
    }
1056
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1057
        int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
1058

    
1059
        tcg_out_ldst_rr(s, reg64, addr_reg,
1060
                        (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1061
                        qemu_ld_opc[sizeop]);
1062

    
1063
        tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
1064
        if (reg64 != datalo) {
1065
            tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
1066
        }
1067
    } else {
1068
        tcg_out_ldst_rr(s, datalo, addr_reg,
1069
                        (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1070
                        qemu_ld_opc[sizeop]);
1071
    }
1072
#endif /* CONFIG_SOFTMMU */
1073
}
1074

    
1075
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
1076
{
1077
    int addrlo_idx = 1, datalo, datahi, addr_reg;
1078
#if defined(CONFIG_SOFTMMU)
1079
    int memi_idx, memi, n, datafull;
1080
    uint32_t *label_ptr;
1081
#endif
1082

    
1083
    datahi = datalo = args[0];
1084
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1085
        datahi = args[1];
1086
        addrlo_idx = 2;
1087
    }
1088

    
1089
#if defined(CONFIG_SOFTMMU)
1090
    memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
1091
    memi = args[memi_idx];
1092

    
1093
    addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, sizeop, args,
1094
                                offsetof(CPUTLBEntry, addr_write));
1095

    
1096
    datafull = datalo;
1097
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1098
        /* Reconstruct the full 64-bit value.  */
1099
        tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
1100
        tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
1101
        tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1102
        datafull = TCG_REG_O2;
1103
    }
1104

    
1105
    /* The fast path is exactly one insn.  Thus we can perform the entire
1106
       TLB Hit in the (annulled) delay slot of the branch over TLB Miss.  */
1107
    /* beq,a,pt %[xi]cc, label0 */
1108
    label_ptr = (uint32_t *)s->code_ptr;
1109
    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1110
                  | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1111
    /* delay slot */
1112
    tcg_out_ldst_rr(s, datafull, addr_reg, TCG_REG_O1, qemu_st_opc[sizeop]);
1113

    
1114
    /* TLB Miss.  */
1115

    
1116
    n = 0;
1117
    tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1118
    if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1119
        tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1120
                    args[addrlo_idx + 1]);
1121
    }
1122
    tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1123
                args[addrlo_idx]);
1124
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1125
        tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datahi);
1126
    }
1127
    tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datalo);
1128

    
1129
    /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
1130
    tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[sizeop]
1131
                           - (tcg_target_ulong)s->code_ptr) >> 2)
1132
                         & 0x3fffffff));
1133
    /* delay slot */
1134
    tcg_out_movi(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n], memi);
1135

    
1136
    *label_ptr |= INSN_OFF19((unsigned long)s->code_ptr -
1137
                             (unsigned long)label_ptr);
1138
#else
1139
    addr_reg = args[addrlo_idx];
1140
    if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
1141
        tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1142
        addr_reg = TCG_REG_T1;
1143
    }
1144
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1145
        tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
1146
        tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
1147
        tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1148
        datalo = TCG_REG_O2;
1149
    }
1150
    tcg_out_ldst_rr(s, datalo, addr_reg,
1151
                    (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1152
                    qemu_st_opc[sizeop]);
1153
#endif /* CONFIG_SOFTMMU */
1154
}
1155

    
1156
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1157
                              const int *const_args)
1158
{
1159
    int c;
1160

    
1161
    switch (opc) {
1162
    case INDEX_op_exit_tb:
1163
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
1164
        tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
1165
                  INSN_IMM13(8));
1166
        tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
1167
                      INSN_RS2(TCG_REG_G0));
1168
        break;
1169
    case INDEX_op_goto_tb:
1170
        if (s->tb_jmp_offset) {
1171
            /* direct jump method */
1172
            uint32_t old_insn = *(uint32_t *)s->code_ptr;
1173
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1174
            /* Make sure to preserve links during retranslation.  */
1175
            tcg_out32(s, CALL | (old_insn & ~INSN_OP(-1)));
1176
        } else {
1177
            /* indirect jump method */
1178
            tcg_out_ld_ptr(s, TCG_REG_T1,
1179
                           (tcg_target_long)(s->tb_next + args[0]));
1180
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_T1) |
1181
                      INSN_RS2(TCG_REG_G0));
1182
        }
1183
        tcg_out_nop(s);
1184
        s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1185
        break;
1186
    case INDEX_op_call:
1187
        if (const_args[0]) {
1188
            tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
1189
                                   - (tcg_target_ulong)s->code_ptr) >> 2)
1190
                                 & 0x3fffffff));
1191
        } else {
1192
            tcg_out_ld_ptr(s, TCG_REG_T1,
1193
                           (tcg_target_long)(s->tb_next + args[0]));
1194
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_T1) |
1195
                      INSN_RS2(TCG_REG_G0));
1196
        }
1197
        /* delay slot */
1198
        tcg_out_nop(s);
1199
        break;
1200
    case INDEX_op_br:
1201
        tcg_out_bpcc(s, COND_A, BPCC_PT, args[0]);
1202
        tcg_out_nop(s);
1203
        break;
1204
    case INDEX_op_movi_i32:
1205
        tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1206
        break;
1207

    
1208
#if TCG_TARGET_REG_BITS == 64
1209
#define OP_32_64(x)                             \
1210
        glue(glue(case INDEX_op_, x), _i32):    \
1211
        glue(glue(case INDEX_op_, x), _i64)
1212
#else
1213
#define OP_32_64(x)                             \
1214
        glue(glue(case INDEX_op_, x), _i32)
1215
#endif
1216
    OP_32_64(ld8u):
1217
        tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
1218
        break;
1219
    OP_32_64(ld8s):
1220
        tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
1221
        break;
1222
    OP_32_64(ld16u):
1223
        tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
1224
        break;
1225
    OP_32_64(ld16s):
1226
        tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
1227
        break;
1228
    case INDEX_op_ld_i32:
1229
#if TCG_TARGET_REG_BITS == 64
1230
    case INDEX_op_ld32u_i64:
1231
#endif
1232
        tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
1233
        break;
1234
    OP_32_64(st8):
1235
        tcg_out_ldst(s, args[0], args[1], args[2], STB);
1236
        break;
1237
    OP_32_64(st16):
1238
        tcg_out_ldst(s, args[0], args[1], args[2], STH);
1239
        break;
1240
    case INDEX_op_st_i32:
1241
#if TCG_TARGET_REG_BITS == 64
1242
    case INDEX_op_st32_i64:
1243
#endif
1244
        tcg_out_ldst(s, args[0], args[1], args[2], STW);
1245
        break;
1246
    OP_32_64(add):
1247
        c = ARITH_ADD;
1248
        goto gen_arith;
1249
    OP_32_64(sub):
1250
        c = ARITH_SUB;
1251
        goto gen_arith;
1252
    OP_32_64(and):
1253
        c = ARITH_AND;
1254
        goto gen_arith;
1255
    OP_32_64(andc):
1256
        c = ARITH_ANDN;
1257
        goto gen_arith;
1258
    OP_32_64(or):
1259
        c = ARITH_OR;
1260
        goto gen_arith;
1261
    OP_32_64(orc):
1262
        c = ARITH_ORN;
1263
        goto gen_arith;
1264
    OP_32_64(xor):
1265
        c = ARITH_XOR;
1266
        goto gen_arith;
1267
    case INDEX_op_shl_i32:
1268
        c = SHIFT_SLL;
1269
    do_shift32:
1270
        /* Limit immediate shift count lest we create an illegal insn.  */
1271
        tcg_out_arithc(s, args[0], args[1], args[2] & 31, const_args[2], c);
1272
        break;
1273
    case INDEX_op_shr_i32:
1274
        c = SHIFT_SRL;
1275
        goto do_shift32;
1276
    case INDEX_op_sar_i32:
1277
        c = SHIFT_SRA;
1278
        goto do_shift32;
1279
    case INDEX_op_mul_i32:
1280
        c = ARITH_UMUL;
1281
        goto gen_arith;
1282

    
1283
    OP_32_64(neg):
1284
        c = ARITH_SUB;
1285
        goto gen_arith1;
1286
    OP_32_64(not):
1287
        c = ARITH_ORN;
1288
        goto gen_arith1;
1289

    
1290
    case INDEX_op_div_i32:
1291
        tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
1292
        break;
1293
    case INDEX_op_divu_i32:
1294
        tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
1295
        break;
1296

    
1297
    case INDEX_op_rem_i32:
1298
    case INDEX_op_remu_i32:
1299
        tcg_out_div32(s, TCG_REG_T1, args[1], args[2], const_args[2],
1300
                      opc == INDEX_op_remu_i32);
1301
        tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
1302
                       ARITH_UMUL);
1303
        tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
1304
        break;
1305

    
1306
    case INDEX_op_brcond_i32:
1307
        tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
1308
                           args[3]);
1309
        break;
1310
    case INDEX_op_setcond_i32:
1311
        tcg_out_setcond_i32(s, args[3], args[0], args[1],
1312
                            args[2], const_args[2]);
1313
        break;
1314
    case INDEX_op_movcond_i32:
1315
        tcg_out_movcond_i32(s, args[5], args[0], args[1],
1316
                            args[2], const_args[2], args[3], const_args[3]);
1317
        break;
1318

    
1319
#if TCG_TARGET_REG_BITS == 32
1320
    case INDEX_op_brcond2_i32:
1321
        tcg_out_brcond2_i32(s, args[4], args[0], args[1],
1322
                            args[2], const_args[2],
1323
                            args[3], const_args[3], args[5]);
1324
        break;
1325
    case INDEX_op_setcond2_i32:
1326
        tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
1327
                             args[3], const_args[3],
1328
                             args[4], const_args[4]);
1329
        break;
1330
#endif
1331

    
1332
    case INDEX_op_add2_i32:
1333
        tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1334
                        args[4], const_args[4], args[5], const_args[5],
1335
                        ARITH_ADDCC, ARITH_ADDX);
1336
        break;
1337
    case INDEX_op_sub2_i32:
1338
        tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1339
                        args[4], const_args[4], args[5], const_args[5],
1340
                        ARITH_SUBCC, ARITH_SUBX);
1341
        break;
1342
    case INDEX_op_mulu2_i32:
1343
        tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
1344
                       ARITH_UMUL);
1345
        tcg_out_rdy(s, args[1]);
1346
        break;
1347

    
1348
    case INDEX_op_qemu_ld8u:
1349
        tcg_out_qemu_ld(s, args, 0);
1350
        break;
1351
    case INDEX_op_qemu_ld8s:
1352
        tcg_out_qemu_ld(s, args, 0 | 4);
1353
        break;
1354
    case INDEX_op_qemu_ld16u:
1355
        tcg_out_qemu_ld(s, args, 1);
1356
        break;
1357
    case INDEX_op_qemu_ld16s:
1358
        tcg_out_qemu_ld(s, args, 1 | 4);
1359
        break;
1360
    case INDEX_op_qemu_ld32:
1361
#if TCG_TARGET_REG_BITS == 64
1362
    case INDEX_op_qemu_ld32u:
1363
#endif
1364
        tcg_out_qemu_ld(s, args, 2);
1365
        break;
1366
#if TCG_TARGET_REG_BITS == 64
1367
    case INDEX_op_qemu_ld32s:
1368
        tcg_out_qemu_ld(s, args, 2 | 4);
1369
        break;
1370
#endif
1371
    case INDEX_op_qemu_ld64:
1372
        tcg_out_qemu_ld(s, args, 3);
1373
        break;
1374
    case INDEX_op_qemu_st8:
1375
        tcg_out_qemu_st(s, args, 0);
1376
        break;
1377
    case INDEX_op_qemu_st16:
1378
        tcg_out_qemu_st(s, args, 1);
1379
        break;
1380
    case INDEX_op_qemu_st32:
1381
        tcg_out_qemu_st(s, args, 2);
1382
        break;
1383
    case INDEX_op_qemu_st64:
1384
        tcg_out_qemu_st(s, args, 3);
1385
        break;
1386

    
1387
#if TCG_TARGET_REG_BITS == 64
1388
    case INDEX_op_movi_i64:
1389
        tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1390
        break;
1391
    case INDEX_op_ld32s_i64:
1392
        tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
1393
        break;
1394
    case INDEX_op_ld_i64:
1395
        tcg_out_ldst(s, args[0], args[1], args[2], LDX);
1396
        break;
1397
    case INDEX_op_st_i64:
1398
        tcg_out_ldst(s, args[0], args[1], args[2], STX);
1399
        break;
1400
    case INDEX_op_shl_i64:
1401
        c = SHIFT_SLLX;
1402
    do_shift64:
1403
        /* Limit immediate shift count lest we create an illegal insn.  */
1404
        tcg_out_arithc(s, args[0], args[1], args[2] & 63, const_args[2], c);
1405
        break;
1406
    case INDEX_op_shr_i64:
1407
        c = SHIFT_SRLX;
1408
        goto do_shift64;
1409
    case INDEX_op_sar_i64:
1410
        c = SHIFT_SRAX;
1411
        goto do_shift64;
1412
    case INDEX_op_mul_i64:
1413
        c = ARITH_MULX;
1414
        goto gen_arith;
1415
    case INDEX_op_div_i64:
1416
        c = ARITH_SDIVX;
1417
        goto gen_arith;
1418
    case INDEX_op_divu_i64:
1419
        c = ARITH_UDIVX;
1420
        goto gen_arith;
1421
    case INDEX_op_rem_i64:
1422
    case INDEX_op_remu_i64:
1423
        tcg_out_arithc(s, TCG_REG_T1, args[1], args[2], const_args[2],
1424
                       opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
1425
        tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
1426
                       ARITH_MULX);
1427
        tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
1428
        break;
1429
    case INDEX_op_ext32s_i64:
1430
        if (const_args[1]) {
1431
            tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]);
1432
        } else {
1433
            tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
1434
        }
1435
        break;
1436
    case INDEX_op_ext32u_i64:
1437
        if (const_args[1]) {
1438
            tcg_out_movi_imm32(s, args[0], args[1]);
1439
        } else {
1440
            tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
1441
        }
1442
        break;
1443

    
1444
    case INDEX_op_brcond_i64:
1445
        tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
1446
                           args[3]);
1447
        break;
1448
    case INDEX_op_setcond_i64:
1449
        tcg_out_setcond_i64(s, args[3], args[0], args[1],
1450
                            args[2], const_args[2]);
1451
        break;
1452
    case INDEX_op_movcond_i64:
1453
        tcg_out_movcond_i64(s, args[5], args[0], args[1],
1454
                            args[2], const_args[2], args[3], const_args[3]);
1455
        break;
1456
#endif
1457
    gen_arith:
1458
        tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
1459
        break;
1460

    
1461
    gen_arith1:
1462
        tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
1463
        break;
1464

    
1465
    default:
1466
        fprintf(stderr, "unknown opcode 0x%x\n", opc);
1467
        tcg_abort();
1468
    }
1469
}
1470

    
1471
static const TCGTargetOpDef sparc_op_defs[] = {
1472
    { INDEX_op_exit_tb, { } },
1473
    { INDEX_op_goto_tb, { } },
1474
    { INDEX_op_call, { "ri" } },
1475
    { INDEX_op_br, { } },
1476

    
1477
    { INDEX_op_mov_i32, { "r", "r" } },
1478
    { INDEX_op_movi_i32, { "r" } },
1479
    { INDEX_op_ld8u_i32, { "r", "r" } },
1480
    { INDEX_op_ld8s_i32, { "r", "r" } },
1481
    { INDEX_op_ld16u_i32, { "r", "r" } },
1482
    { INDEX_op_ld16s_i32, { "r", "r" } },
1483
    { INDEX_op_ld_i32, { "r", "r" } },
1484
    { INDEX_op_st8_i32, { "rZ", "r" } },
1485
    { INDEX_op_st16_i32, { "rZ", "r" } },
1486
    { INDEX_op_st_i32, { "rZ", "r" } },
1487

    
1488
    { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1489
    { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1490
    { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1491
    { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
1492
    { INDEX_op_rem_i32, { "r", "rZ", "rJ" } },
1493
    { INDEX_op_remu_i32, { "r", "rZ", "rJ" } },
1494
    { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1495
    { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1496
    { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1497
    { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1498
    { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1499
    { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1500

    
1501
    { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1502
    { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1503
    { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
1504

    
1505
    { INDEX_op_neg_i32, { "r", "rJ" } },
1506
    { INDEX_op_not_i32, { "r", "rJ" } },
1507

    
1508
    { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1509
    { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1510
    { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
1511

    
1512
#if TCG_TARGET_REG_BITS == 32
1513
    { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1514
    { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rJ", "rJ" } },
1515
#endif
1516

    
1517
    { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1518
    { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1519
    { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
1520

    
1521
#if TCG_TARGET_REG_BITS == 64
1522
    { INDEX_op_mov_i64, { "r", "r" } },
1523
    { INDEX_op_movi_i64, { "r" } },
1524
    { INDEX_op_ld8u_i64, { "r", "r" } },
1525
    { INDEX_op_ld8s_i64, { "r", "r" } },
1526
    { INDEX_op_ld16u_i64, { "r", "r" } },
1527
    { INDEX_op_ld16s_i64, { "r", "r" } },
1528
    { INDEX_op_ld32u_i64, { "r", "r" } },
1529
    { INDEX_op_ld32s_i64, { "r", "r" } },
1530
    { INDEX_op_ld_i64, { "r", "r" } },
1531
    { INDEX_op_st8_i64, { "rZ", "r" } },
1532
    { INDEX_op_st16_i64, { "rZ", "r" } },
1533
    { INDEX_op_st32_i64, { "rZ", "r" } },
1534
    { INDEX_op_st_i64, { "rZ", "r" } },
1535

    
1536
    { INDEX_op_add_i64, { "r", "rZ", "rJ" } },
1537
    { INDEX_op_mul_i64, { "r", "rZ", "rJ" } },
1538
    { INDEX_op_div_i64, { "r", "rZ", "rJ" } },
1539
    { INDEX_op_divu_i64, { "r", "rZ", "rJ" } },
1540
    { INDEX_op_rem_i64, { "r", "rZ", "rJ" } },
1541
    { INDEX_op_remu_i64, { "r", "rZ", "rJ" } },
1542
    { INDEX_op_sub_i64, { "r", "rZ", "rJ" } },
1543
    { INDEX_op_and_i64, { "r", "rZ", "rJ" } },
1544
    { INDEX_op_andc_i64, { "r", "rZ", "rJ" } },
1545
    { INDEX_op_or_i64, { "r", "rZ", "rJ" } },
1546
    { INDEX_op_orc_i64, { "r", "rZ", "rJ" } },
1547
    { INDEX_op_xor_i64, { "r", "rZ", "rJ" } },
1548

    
1549
    { INDEX_op_shl_i64, { "r", "rZ", "rJ" } },
1550
    { INDEX_op_shr_i64, { "r", "rZ", "rJ" } },
1551
    { INDEX_op_sar_i64, { "r", "rZ", "rJ" } },
1552

    
1553
    { INDEX_op_neg_i64, { "r", "rJ" } },
1554
    { INDEX_op_not_i64, { "r", "rJ" } },
1555

    
1556
    { INDEX_op_ext32s_i64, { "r", "ri" } },
1557
    { INDEX_op_ext32u_i64, { "r", "ri" } },
1558

    
1559
    { INDEX_op_brcond_i64, { "rZ", "rJ" } },
1560
    { INDEX_op_setcond_i64, { "r", "rZ", "rJ" } },
1561
    { INDEX_op_movcond_i64, { "r", "rZ", "rJ", "rI", "0" } },
1562
#endif
1563

    
1564
#if TCG_TARGET_REG_BITS == 64
1565
    { INDEX_op_qemu_ld8u, { "r", "L" } },
1566
    { INDEX_op_qemu_ld8s, { "r", "L" } },
1567
    { INDEX_op_qemu_ld16u, { "r", "L" } },
1568
    { INDEX_op_qemu_ld16s, { "r", "L" } },
1569
    { INDEX_op_qemu_ld32, { "r", "L" } },
1570
    { INDEX_op_qemu_ld32u, { "r", "L" } },
1571
    { INDEX_op_qemu_ld32s, { "r", "L" } },
1572
    { INDEX_op_qemu_ld64, { "r", "L" } },
1573

    
1574
    { INDEX_op_qemu_st8, { "L", "L" } },
1575
    { INDEX_op_qemu_st16, { "L", "L" } },
1576
    { INDEX_op_qemu_st32, { "L", "L" } },
1577
    { INDEX_op_qemu_st64, { "L", "L" } },
1578
#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1579
    { INDEX_op_qemu_ld8u, { "r", "L" } },
1580
    { INDEX_op_qemu_ld8s, { "r", "L" } },
1581
    { INDEX_op_qemu_ld16u, { "r", "L" } },
1582
    { INDEX_op_qemu_ld16s, { "r", "L" } },
1583
    { INDEX_op_qemu_ld32, { "r", "L" } },
1584
    { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1585

    
1586
    { INDEX_op_qemu_st8, { "L", "L" } },
1587
    { INDEX_op_qemu_st16, { "L", "L" } },
1588
    { INDEX_op_qemu_st32, { "L", "L" } },
1589
    { INDEX_op_qemu_st64, { "L", "L", "L" } },
1590
#else
1591
    { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1592
    { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1593
    { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1594
    { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1595
    { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1596
    { INDEX_op_qemu_ld64, { "L", "L", "L", "L" } },
1597

    
1598
    { INDEX_op_qemu_st8, { "L", "L", "L" } },
1599
    { INDEX_op_qemu_st16, { "L", "L", "L" } },
1600
    { INDEX_op_qemu_st32, { "L", "L", "L" } },
1601
    { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
1602
#endif
1603

    
1604
    { -1 },
1605
};
1606

    
1607
static void tcg_target_init(TCGContext *s)
1608
{
1609
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1610
#if TCG_TARGET_REG_BITS == 64
1611
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1612
#endif
1613
    tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1614
                     (1 << TCG_REG_G1) |
1615
                     (1 << TCG_REG_G2) |
1616
                     (1 << TCG_REG_G3) |
1617
                     (1 << TCG_REG_G4) |
1618
                     (1 << TCG_REG_G5) |
1619
                     (1 << TCG_REG_G6) |
1620
                     (1 << TCG_REG_G7) |
1621
                     (1 << TCG_REG_O0) |
1622
                     (1 << TCG_REG_O1) |
1623
                     (1 << TCG_REG_O2) |
1624
                     (1 << TCG_REG_O3) |
1625
                     (1 << TCG_REG_O4) |
1626
                     (1 << TCG_REG_O5) |
1627
                     (1 << TCG_REG_O7));
1628

    
1629
    tcg_regset_clear(s->reserved_regs);
1630
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1631
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1632
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1633
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1634
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1635
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1636
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1637
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1638

    
1639
    tcg_add_target_add_op_defs(sparc_op_defs);
1640
}
1641

    
1642
#if TCG_TARGET_REG_BITS == 64
1643
# define ELF_HOST_MACHINE  EM_SPARCV9
1644
#else
1645
# define ELF_HOST_MACHINE  EM_SPARC32PLUS
1646
# define ELF_HOST_FLAGS    EF_SPARC_32PLUS
1647
#endif
1648

    
1649
typedef struct {
1650
    DebugFrameCIE cie;
1651
    DebugFrameFDEHeader fde;
1652
    uint8_t fde_def_cfa[TCG_TARGET_REG_BITS == 64 ? 4 : 2];
1653
    uint8_t fde_win_save;
1654
    uint8_t fde_ret_save[3];
1655
} DebugFrame;
1656

    
1657
static DebugFrame debug_frame = {
1658
    .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1659
    .cie.id = -1,
1660
    .cie.version = 1,
1661
    .cie.code_align = 1,
1662
    .cie.data_align = -sizeof(void *) & 0x7f,
1663
    .cie.return_column = 15,            /* o7 */
1664

    
1665
    /* Total FDE size does not include the "len" member.  */
1666
    .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
1667

    
1668
    .fde_def_cfa = {
1669
#if TCG_TARGET_REG_BITS == 64
1670
        12, 30,                         /* DW_CFA_def_cfa i6, 2047 */
1671
        (2047 & 0x7f) | 0x80, (2047 >> 7)
1672
#else
1673
        13, 30                          /* DW_CFA_def_cfa_register i6 */
1674
#endif
1675
    },
1676
    .fde_win_save = 0x2d,               /* DW_CFA_GNU_window_save */
1677
    .fde_ret_save = { 9, 15, 31 },      /* DW_CFA_register o7, i7 */
1678
};
1679

    
1680
void tcg_register_jit(void *buf, size_t buf_size)
1681
{
1682
    debug_frame.fde.func_start = (tcg_target_long) buf;
1683
    debug_frame.fde.func_len = buf_size;
1684

    
1685
    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1686
}
1687

    
1688
void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1689
{
1690
    uint32_t *ptr = (uint32_t *)jmp_addr;
1691
    tcg_target_long disp = (tcg_target_long)(addr - jmp_addr) >> 2;
1692

    
1693
    /* We can reach the entire address space for 32-bit.  For 64-bit
1694
       the code_gen_buffer can't be larger than 2GB.  */
1695
    if (TCG_TARGET_REG_BITS == 64 && !check_fit_tl(disp, 30)) {
1696
        tcg_abort();
1697
    }
1698

    
1699
    *ptr = CALL | (disp & 0x3fffffff);
1700
    flush_icache_range(jmp_addr, jmp_addr + 4);
1701
}