Statistics
| Branch: | Revision:

root / tcg / sparc / tcg-target.c @ 375816f8

History | View | Annotate | Download (50.7 kB)

1
/*
2
 * Tiny Code Generator for QEMU
3
 *
4
 * Copyright (c) 2008 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#ifndef NDEBUG
26
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27
    "%g0",
28
    "%g1",
29
    "%g2",
30
    "%g3",
31
    "%g4",
32
    "%g5",
33
    "%g6",
34
    "%g7",
35
    "%o0",
36
    "%o1",
37
    "%o2",
38
    "%o3",
39
    "%o4",
40
    "%o5",
41
    "%o6",
42
    "%o7",
43
    "%l0",
44
    "%l1",
45
    "%l2",
46
    "%l3",
47
    "%l4",
48
    "%l5",
49
    "%l6",
50
    "%l7",
51
    "%i0",
52
    "%i1",
53
    "%i2",
54
    "%i3",
55
    "%i4",
56
    "%i5",
57
    "%i6",
58
    "%i7",
59
};
60
#endif
61

    
62
/* Define some temporary registers.  T2 is used for constant generation.  */
63
#define TCG_REG_T1  TCG_REG_G1
64
#define TCG_REG_T2  TCG_REG_O7
65

    
66
#ifdef CONFIG_USE_GUEST_BASE
67
# define TCG_GUEST_BASE_REG TCG_REG_I5
68
#else
69
# define TCG_GUEST_BASE_REG TCG_REG_G0
70
#endif
71

    
72
static const int tcg_target_reg_alloc_order[] = {
73
    TCG_REG_L0,
74
    TCG_REG_L1,
75
    TCG_REG_L2,
76
    TCG_REG_L3,
77
    TCG_REG_L4,
78
    TCG_REG_L5,
79
    TCG_REG_L6,
80
    TCG_REG_L7,
81
    TCG_REG_I0,
82
    TCG_REG_I1,
83
    TCG_REG_I2,
84
    TCG_REG_I3,
85
    TCG_REG_I4,
86
    TCG_REG_I5,
87
};
88

    
89
static const int tcg_target_call_iarg_regs[6] = {
90
    TCG_REG_O0,
91
    TCG_REG_O1,
92
    TCG_REG_O2,
93
    TCG_REG_O3,
94
    TCG_REG_O4,
95
    TCG_REG_O5,
96
};
97

    
98
static const int tcg_target_call_oarg_regs[] = {
99
    TCG_REG_O0,
100
    TCG_REG_O1,
101
    TCG_REG_O2,
102
    TCG_REG_O3,
103
};
104

    
105
static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
106
{
107
    return (val << ((sizeof(tcg_target_long) * 8 - bits))
108
            >> (sizeof(tcg_target_long) * 8 - bits)) == val;
109
}
110

    
111
static inline int check_fit_i32(uint32_t val, unsigned int bits)
112
{
113
    return ((val << (32 - bits)) >> (32 - bits)) == val;
114
}
115

    
116
static void patch_reloc(uint8_t *code_ptr, int type,
117
                        tcg_target_long value, tcg_target_long addend)
118
{
119
    value += addend;
120
    switch (type) {
121
    case R_SPARC_32:
122
        if (value != (uint32_t)value)
123
            tcg_abort();
124
        *(uint32_t *)code_ptr = value;
125
        break;
126
    case R_SPARC_WDISP22:
127
        value -= (long)code_ptr;
128
        value >>= 2;
129
        if (!check_fit_tl(value, 22))
130
            tcg_abort();
131
        *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x3fffff) | value;
132
        break;
133
    case R_SPARC_WDISP19:
134
        value -= (long)code_ptr;
135
        value >>= 2;
136
        if (!check_fit_tl(value, 19))
137
            tcg_abort();
138
        *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x7ffff) | value;
139
        break;
140
    default:
141
        tcg_abort();
142
    }
143
}
144

    
145
/* maximum number of register used for input function arguments */
146
static inline int tcg_target_get_call_iarg_regs_count(int flags)
147
{
148
    return 6;
149
}
150

    
151
/* parse target specific constraints */
152
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
153
{
154
    const char *ct_str;
155

    
156
    ct_str = *pct_str;
157
    switch (ct_str[0]) {
158
    case 'r':
159
        ct->ct |= TCG_CT_REG;
160
        tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
161
        break;
162
    case 'L': /* qemu_ld/st constraint */
163
        ct->ct |= TCG_CT_REG;
164
        tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
165
        // Helper args
166
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
167
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
168
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
169
        break;
170
    case 'I':
171
        ct->ct |= TCG_CT_CONST_S11;
172
        break;
173
    case 'J':
174
        ct->ct |= TCG_CT_CONST_S13;
175
        break;
176
    default:
177
        return -1;
178
    }
179
    ct_str++;
180
    *pct_str = ct_str;
181
    return 0;
182
}
183

    
184
/* test if a constant matches the constraint */
185
static inline int tcg_target_const_match(tcg_target_long val,
186
                                         const TCGArgConstraint *arg_ct)
187
{
188
    int ct;
189

    
190
    ct = arg_ct->ct;
191
    if (ct & TCG_CT_CONST)
192
        return 1;
193
    else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11))
194
        return 1;
195
    else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13))
196
        return 1;
197
    else
198
        return 0;
199
}
200

    
201
#define INSN_OP(x)  ((x) << 30)
202
#define INSN_OP2(x) ((x) << 22)
203
#define INSN_OP3(x) ((x) << 19)
204
#define INSN_OPF(x) ((x) << 5)
205
#define INSN_RD(x)  ((x) << 25)
206
#define INSN_RS1(x) ((x) << 14)
207
#define INSN_RS2(x) (x)
208
#define INSN_ASI(x) ((x) << 5)
209

    
210
#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
211
#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
212
#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
213
#define INSN_OFF22(x) (((x) >> 2) & 0x3fffff)
214

    
215
#define INSN_COND(x, a) (((x) << 25) | ((a) << 29))
216
#define COND_N     0x0
217
#define COND_E     0x1
218
#define COND_LE    0x2
219
#define COND_L     0x3
220
#define COND_LEU   0x4
221
#define COND_CS    0x5
222
#define COND_NEG   0x6
223
#define COND_VS    0x7
224
#define COND_A     0x8
225
#define COND_NE    0x9
226
#define COND_G     0xa
227
#define COND_GE    0xb
228
#define COND_GU    0xc
229
#define COND_CC    0xd
230
#define COND_POS   0xe
231
#define COND_VC    0xf
232
#define BA         (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2))
233

    
234
#define MOVCC_ICC  (1 << 18)
235
#define MOVCC_XCC  (1 << 18 | 1 << 12)
236

    
237
#define ARITH_ADD  (INSN_OP(2) | INSN_OP3(0x00))
238
#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
239
#define ARITH_AND  (INSN_OP(2) | INSN_OP3(0x01))
240
#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
241
#define ARITH_OR   (INSN_OP(2) | INSN_OP3(0x02))
242
#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
243
#define ARITH_ORN  (INSN_OP(2) | INSN_OP3(0x06))
244
#define ARITH_XOR  (INSN_OP(2) | INSN_OP3(0x03))
245
#define ARITH_SUB  (INSN_OP(2) | INSN_OP3(0x04))
246
#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
247
#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
248
#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
249
#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
250
#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
251
#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
252
#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
253
#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
254
#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
255
#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
256

    
257
#define SHIFT_SLL  (INSN_OP(2) | INSN_OP3(0x25))
258
#define SHIFT_SRL  (INSN_OP(2) | INSN_OP3(0x26))
259
#define SHIFT_SRA  (INSN_OP(2) | INSN_OP3(0x27))
260

    
261
#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
262
#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
263
#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
264

    
265
#define RDY        (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
266
#define WRY        (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
267
#define JMPL       (INSN_OP(2) | INSN_OP3(0x38))
268
#define SAVE       (INSN_OP(2) | INSN_OP3(0x3c))
269
#define RESTORE    (INSN_OP(2) | INSN_OP3(0x3d))
270
#define SETHI      (INSN_OP(0) | INSN_OP2(0x4))
271
#define CALL       INSN_OP(1)
272
#define LDUB       (INSN_OP(3) | INSN_OP3(0x01))
273
#define LDSB       (INSN_OP(3) | INSN_OP3(0x09))
274
#define LDUH       (INSN_OP(3) | INSN_OP3(0x02))
275
#define LDSH       (INSN_OP(3) | INSN_OP3(0x0a))
276
#define LDUW       (INSN_OP(3) | INSN_OP3(0x00))
277
#define LDSW       (INSN_OP(3) | INSN_OP3(0x08))
278
#define LDX        (INSN_OP(3) | INSN_OP3(0x0b))
279
#define STB        (INSN_OP(3) | INSN_OP3(0x05))
280
#define STH        (INSN_OP(3) | INSN_OP3(0x06))
281
#define STW        (INSN_OP(3) | INSN_OP3(0x04))
282
#define STX        (INSN_OP(3) | INSN_OP3(0x0e))
283
#define LDUBA      (INSN_OP(3) | INSN_OP3(0x11))
284
#define LDSBA      (INSN_OP(3) | INSN_OP3(0x19))
285
#define LDUHA      (INSN_OP(3) | INSN_OP3(0x12))
286
#define LDSHA      (INSN_OP(3) | INSN_OP3(0x1a))
287
#define LDUWA      (INSN_OP(3) | INSN_OP3(0x10))
288
#define LDSWA      (INSN_OP(3) | INSN_OP3(0x18))
289
#define LDXA       (INSN_OP(3) | INSN_OP3(0x1b))
290
#define STBA       (INSN_OP(3) | INSN_OP3(0x15))
291
#define STHA       (INSN_OP(3) | INSN_OP3(0x16))
292
#define STWA       (INSN_OP(3) | INSN_OP3(0x14))
293
#define STXA       (INSN_OP(3) | INSN_OP3(0x1e))
294

    
295
#ifndef ASI_PRIMARY_LITTLE
296
#define ASI_PRIMARY_LITTLE 0x88
297
#endif
298

    
299
#define LDUH_LE    (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
300
#define LDSH_LE    (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
301
#define LDUW_LE    (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
302
#define LDSW_LE    (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
303
#define LDX_LE     (LDXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
304

    
305
#define STH_LE     (STHA  | INSN_ASI(ASI_PRIMARY_LITTLE))
306
#define STW_LE     (STWA  | INSN_ASI(ASI_PRIMARY_LITTLE))
307
#define STX_LE     (STXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
308

    
309
static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
310
                                 int op)
311
{
312
    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
313
              INSN_RS2(rs2));
314
}
315

    
316
static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
317
                                  uint32_t offset, int op)
318
{
319
    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
320
              INSN_IMM13(offset));
321
}
322

    
323
static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
324
                           int val2, int val2const, int op)
325
{
326
    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
327
              | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
328
}
329

    
330
static inline void tcg_out_mov(TCGContext *s, TCGType type,
331
                               TCGReg ret, TCGReg arg)
332
{
333
    tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
334
}
335

    
336
static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
337
{
338
    tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
339
}
340

    
341
static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
342
{
343
    tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
344
}
345

    
346
static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
347
{
348
    if (check_fit_tl(arg, 13))
349
        tcg_out_movi_imm13(s, ret, arg);
350
    else {
351
        tcg_out_sethi(s, ret, arg);
352
        if (arg & 0x3ff)
353
            tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
354
    }
355
}
356

    
357
static inline void tcg_out_movi(TCGContext *s, TCGType type,
358
                                TCGReg ret, tcg_target_long arg)
359
{
360
    /* All 32-bit constants, as well as 64-bit constants with
361
       no high bits set go through movi_imm32.  */
362
    if (TCG_TARGET_REG_BITS == 32
363
        || type == TCG_TYPE_I32
364
        || (arg & ~(tcg_target_long)0xffffffff) == 0) {
365
        tcg_out_movi_imm32(s, ret, arg);
366
    } else if (check_fit_tl(arg, 13)) {
367
        /* A 13-bit constant sign-extended to 64-bits.  */
368
        tcg_out_movi_imm13(s, ret, arg);
369
    } else if (check_fit_tl(arg, 32)) {
370
        /* A 32-bit constant sign-extended to 64-bits.  */
371
        tcg_out_sethi(s, ret, ~arg);
372
        tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
373
    } else {
374
        tcg_out_movi_imm32(s, ret, arg >> (TCG_TARGET_REG_BITS / 2));
375
        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
376
        tcg_out_movi_imm32(s, TCG_REG_T2, arg);
377
        tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
378
    }
379
}
380

    
381
static inline void tcg_out_ldst_rr(TCGContext *s, int data, int a1,
382
                                   int a2, int op)
383
{
384
    tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
385
}
386

    
387
static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
388
                                int offset, int op)
389
{
390
    if (check_fit_tl(offset, 13)) {
391
        tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
392
                  INSN_IMM13(offset));
393
    } else {
394
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
395
        tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
396
    }
397
}
398

    
399
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
400
                              TCGReg arg1, tcg_target_long arg2)
401
{
402
    tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
403
}
404

    
405
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
406
                              TCGReg arg1, tcg_target_long arg2)
407
{
408
    tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
409
}
410

    
411
static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
412
                                  tcg_target_long arg)
413
{
414
    if (!check_fit_tl(arg, 10)) {
415
        tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
416
    }
417
    tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
418
}
419

    
420
static inline void tcg_out_sety(TCGContext *s, int rs)
421
{
422
    tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
423
}
424

    
425
static inline void tcg_out_rdy(TCGContext *s, int rd)
426
{
427
    tcg_out32(s, RDY | INSN_RD(rd));
428
}
429

    
430
static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
431
{
432
    if (val != 0) {
433
        if (check_fit_tl(val, 13))
434
            tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
435
        else {
436
            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, val);
437
            tcg_out_arith(s, reg, reg, TCG_REG_T1, ARITH_ADD);
438
        }
439
    }
440
}
441

    
442
static inline void tcg_out_andi(TCGContext *s, int rd, int rs,
443
                                tcg_target_long val)
444
{
445
    if (val != 0) {
446
        if (check_fit_tl(val, 13))
447
            tcg_out_arithi(s, rd, rs, val, ARITH_AND);
448
        else {
449
            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T1, val);
450
            tcg_out_arith(s, rd, rs, TCG_REG_T1, ARITH_AND);
451
        }
452
    }
453
}
454

    
455
static void tcg_out_div32(TCGContext *s, int rd, int rs1,
456
                          int val2, int val2const, int uns)
457
{
458
    /* Load Y with the sign/zero extension of RS1 to 64-bits.  */
459
    if (uns) {
460
        tcg_out_sety(s, TCG_REG_G0);
461
    } else {
462
        tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
463
        tcg_out_sety(s, TCG_REG_T1);
464
    }
465

    
466
    tcg_out_arithc(s, rd, rs1, val2, val2const,
467
                   uns ? ARITH_UDIV : ARITH_SDIV);
468
}
469

    
470
static inline void tcg_out_nop(TCGContext *s)
471
{
472
    tcg_out_sethi(s, TCG_REG_G0, 0);
473
}
474

    
475
static void tcg_out_branch_i32(TCGContext *s, int opc, int label_index)
476
{
477
    TCGLabel *l = &s->labels[label_index];
478

    
479
    if (l->has_value) {
480
        tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2)
481
                      | INSN_OFF22(l->u.value - (unsigned long)s->code_ptr)));
482
    } else {
483
        tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP22, label_index, 0);
484
        tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2) | 0));
485
    }
486
}
487

    
488
#if TCG_TARGET_REG_BITS == 64
489
static void tcg_out_branch_i64(TCGContext *s, int opc, int label_index)
490
{
491
    TCGLabel *l = &s->labels[label_index];
492

    
493
    if (l->has_value) {
494
        tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x1) |
495
                      (0x5 << 19) |
496
                      INSN_OFF19(l->u.value - (unsigned long)s->code_ptr)));
497
    } else {
498
        tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label_index, 0);
499
        tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x1) |
500
                      (0x5 << 19) | 0));
501
    }
502
}
503
#endif
504

    
505
static const uint8_t tcg_cond_to_bcond[10] = {
506
    [TCG_COND_EQ] = COND_E,
507
    [TCG_COND_NE] = COND_NE,
508
    [TCG_COND_LT] = COND_L,
509
    [TCG_COND_GE] = COND_GE,
510
    [TCG_COND_LE] = COND_LE,
511
    [TCG_COND_GT] = COND_G,
512
    [TCG_COND_LTU] = COND_CS,
513
    [TCG_COND_GEU] = COND_CC,
514
    [TCG_COND_LEU] = COND_LEU,
515
    [TCG_COND_GTU] = COND_GU,
516
};
517

    
518
static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
519
{
520
    tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
521
}
522

    
523
static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond,
524
                               TCGArg arg1, TCGArg arg2, int const_arg2,
525
                               int label_index)
526
{
527
    tcg_out_cmp(s, arg1, arg2, const_arg2);
528
    tcg_out_branch_i32(s, tcg_cond_to_bcond[cond], label_index);
529
    tcg_out_nop(s);
530
}
531

    
532
#if TCG_TARGET_REG_BITS == 64
533
static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond,
534
                               TCGArg arg1, TCGArg arg2, int const_arg2,
535
                               int label_index)
536
{
537
    tcg_out_cmp(s, arg1, arg2, const_arg2);
538
    tcg_out_branch_i64(s, tcg_cond_to_bcond[cond], label_index);
539
    tcg_out_nop(s);
540
}
541
#else
542
static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
543
                                TCGArg al, TCGArg ah,
544
                                TCGArg bl, int blconst,
545
                                TCGArg bh, int bhconst, int label_dest)
546
{
547
    int cc, label_next = gen_new_label();
548

    
549
    tcg_out_cmp(s, ah, bh, bhconst);
550

    
551
    /* Note that we fill one of the delay slots with the second compare.  */
552
    switch (cond) {
553
    case TCG_COND_EQ:
554
        cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
555
        tcg_out_branch_i32(s, cc, label_next);
556
        tcg_out_cmp(s, al, bl, blconst);
557
        cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_EQ], 0);
558
        tcg_out_branch_i32(s, cc, label_dest);
559
        break;
560

    
561
    case TCG_COND_NE:
562
        cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
563
        tcg_out_branch_i32(s, cc, label_dest);
564
        tcg_out_cmp(s, al, bl, blconst);
565
        tcg_out_branch_i32(s, cc, label_dest);
566
        break;
567

    
568
    default:
569
        /* ??? One could fairly easily special-case 64-bit unsigned
570
           compares against 32-bit zero-extended constants.  For instance,
571
           we know that (unsigned)AH < 0 is false and need not emit it.
572
           Similarly, (unsigned)AH > 0 being true implies AH != 0, so the
573
           second branch will never be taken.  */
574
        cc = INSN_COND(tcg_cond_to_bcond[cond], 0);
575
        tcg_out_branch_i32(s, cc, label_dest);
576
        tcg_out_nop(s);
577
        cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
578
        tcg_out_branch_i32(s, cc, label_next);
579
        tcg_out_cmp(s, al, bl, blconst);
580
        cc = INSN_COND(tcg_cond_to_bcond[tcg_unsigned_cond(cond)], 0);
581
        tcg_out_branch_i32(s, cc, label_dest);
582
        break;
583
    }
584
    tcg_out_nop(s);
585

    
586
    tcg_out_label(s, label_next, s->code_ptr);
587
}
588
#endif
589

    
590
static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
591
                                TCGArg c1, TCGArg c2, int c2const)
592
{
593
    TCGArg t;
594

    
595
    /* For 32-bit comparisons, we can play games with ADDX/SUBX.  */
596
    switch (cond) {
597
    case TCG_COND_EQ:
598
    case TCG_COND_NE:
599
        if (c2 != 0) {
600
            tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
601
        }
602
        c1 = TCG_REG_G0, c2 = ret, c2const = 0;
603
        cond = (cond == TCG_COND_EQ ? TCG_COND_LEU : TCG_COND_LTU);
604
        break;
605

    
606
    case TCG_COND_GTU:
607
    case TCG_COND_GEU:
608
        if (c2const && c2 != 0) {
609
            tcg_out_movi_imm13(s, TCG_REG_T1, c2);
610
            c2 = TCG_REG_T1;
611
        }
612
        t = c1, c1 = c2, c2 = t, c2const = 0;
613
        cond = tcg_swap_cond(cond);
614
        break;
615

    
616
    case TCG_COND_LTU:
617
    case TCG_COND_LEU:
618
        break;
619

    
620
    default:
621
        tcg_out_cmp(s, c1, c2, c2const);
622
        tcg_out_movi_imm13(s, ret, 0);
623
        tcg_out32(s, ARITH_MOVCC | INSN_RD(ret)
624
                  | INSN_RS1(tcg_cond_to_bcond[cond])
625
                  | MOVCC_ICC | INSN_IMM11(1));
626
        return;
627
    }
628

    
629
    tcg_out_cmp(s, c1, c2, c2const);
630
    if (cond == TCG_COND_LTU) {
631
        tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
632
    } else {
633
        tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
634
    }
635
}
636

    
637
#if TCG_TARGET_REG_BITS == 64
638
static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
639
                                TCGArg c1, TCGArg c2, int c2const)
640
{
641
    tcg_out_cmp(s, c1, c2, c2const);
642
    tcg_out_movi_imm13(s, ret, 0);
643
    tcg_out32 (s, ARITH_MOVCC | INSN_RD(ret)
644
               | INSN_RS1(tcg_cond_to_bcond[cond])
645
               | MOVCC_XCC | INSN_IMM11(1));
646
}
647
#else
648
static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
649
                                 TCGArg al, TCGArg ah,
650
                                 TCGArg bl, int blconst,
651
                                 TCGArg bh, int bhconst)
652
{
653
    int lab;
654

    
655
    switch (cond) {
656
    case TCG_COND_EQ:
657
        tcg_out_setcond_i32(s, TCG_COND_EQ, TCG_REG_T1, al, bl, blconst);
658
        tcg_out_setcond_i32(s, TCG_COND_EQ, ret, ah, bh, bhconst);
659
        tcg_out_arith(s, ret, ret, TCG_REG_T1, ARITH_AND);
660
        break;
661

    
662
    case TCG_COND_NE:
663
        tcg_out_setcond_i32(s, TCG_COND_NE, TCG_REG_T1, al, al, blconst);
664
        tcg_out_setcond_i32(s, TCG_COND_NE, ret, ah, bh, bhconst);
665
        tcg_out_arith(s, ret, ret, TCG_REG_T1, ARITH_OR);
666
        break;
667

    
668
    default:
669
        lab = gen_new_label();
670

    
671
        tcg_out_cmp(s, ah, bh, bhconst);
672
        tcg_out_branch_i32(s, INSN_COND(tcg_cond_to_bcond[cond], 1), lab);
673
        tcg_out_movi_imm13(s, ret, 1);
674
        tcg_out_branch_i32(s, INSN_COND(COND_NE, 1), lab);
675
        tcg_out_movi_imm13(s, ret, 0);
676

    
677
        tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), ret, al, bl, blconst);
678

    
679
        tcg_out_label(s, lab, s->code_ptr);
680
        break;
681
    }
682
}
683
#endif
684

    
685
/* Generate global QEMU prologue and epilogue code */
686
static void tcg_target_qemu_prologue(TCGContext *s)
687
{
688
    int tmp_buf_size, frame_size;
689

    
690
    /* The TCG temp buffer is at the top of the frame, immediately
691
       below the frame pointer.  */
692
    tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
693
    tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
694
                  tmp_buf_size);
695

    
696
    /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
697
       otherwise the minimal frame usable by callees.  */
698
    frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
699
    frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
700
    frame_size += TCG_TARGET_STACK_ALIGN - 1;
701
    frame_size &= -TCG_TARGET_STACK_ALIGN;
702
    tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
703
              INSN_IMM13(-frame_size));
704

    
705
#ifdef CONFIG_USE_GUEST_BASE
706
    if (GUEST_BASE != 0) {
707
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
708
        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
709
    }
710
#endif
711

    
712
    tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I1) |
713
              INSN_RS2(TCG_REG_G0));
714
    /* delay slot */
715
    tcg_out_nop(s);
716

    
717
    /* No epilogue required.  We issue ret + restore directly in the TB.  */
718
}
719

    
720
#if defined(CONFIG_SOFTMMU)
721

    
722
#include "../../softmmu_defs.h"
723

    
724
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
725
   int mmu_idx) */
726
static const void * const qemu_ld_helpers[4] = {
727
    helper_ldb_mmu,
728
    helper_ldw_mmu,
729
    helper_ldl_mmu,
730
    helper_ldq_mmu,
731
};
732

    
733
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
734
   uintxx_t val, int mmu_idx) */
735
static const void * const qemu_st_helpers[4] = {
736
    helper_stb_mmu,
737
    helper_stw_mmu,
738
    helper_stl_mmu,
739
    helper_stq_mmu,
740
};
741

    
742
/* Perform the TLB load and compare.
743

744
   Inputs:
745
   ADDRLO_IDX contains the index into ARGS of the low part of the
746
   address; the high part of the address is at ADDR_LOW_IDX+1.
747

748
   MEM_INDEX and S_BITS are the memory context and log2 size of the load.
749

750
   WHICH is the offset into the CPUTLBEntry structure of the slot to read.
751
   This should be offsetof addr_read or addr_write.
752

753
   The result of the TLB comparison is in %[ix]cc.  The sanitized address
754
   is in the returned register, maybe %o0.  The TLB addend is in %o1.  */
755

    
756
static int tcg_out_tlb_load(TCGContext *s, int addrlo_idx, int mem_index,
757
                            int s_bits, const TCGArg *args, int which)
758
{
759
    const int addrlo = args[addrlo_idx];
760
    const int r0 = TCG_REG_O0;
761
    const int r1 = TCG_REG_O1;
762
    const int r2 = TCG_REG_O2;
763
    int addr = addrlo;
764
    int tlb_ofs;
765

    
766
    if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
767
        /* Assemble the 64-bit address in R0.  */
768
        tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
769
        tcg_out_arithi(s, r1, args[addrlo_idx + 1], 32, SHIFT_SLLX);
770
        tcg_out_arith(s, r0, r0, r1, ARITH_OR);
771
    }
772

    
773
    /* Shift the page number down to tlb-entry.  */
774
    tcg_out_arithi(s, r1, addrlo,
775
                   TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
776

    
777
    /* Mask out the page offset, except for the required alignment.  */
778
    tcg_out_andi(s, r0, addr, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
779

    
780
    /* Compute tlb index, modulo tlb size.  */
781
    tcg_out_andi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
782

    
783
    /* Relative to the current ENV.  */
784
    tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
785

    
786
    /* Find a base address that can load both tlb comparator and addend.  */
787
    tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
788
    if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
789
        tcg_out_addi(s, r1, tlb_ofs);
790
        tlb_ofs = 0;
791
    }
792

    
793
    /* Load the tlb comparator and the addend.  */
794
    tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
795
    tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
796

    
797
    /* subcc arg0, arg2, %g0 */
798
    tcg_out_cmp(s, r0, r2, 0);
799

    
800
    /* If the guest address must be zero-extended, do so now.  */
801
    if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
802
        tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
803
        return r0;
804
    }
805
    return addrlo;
806
}
807
#endif /* CONFIG_SOFTMMU */
808

    
809
static const int qemu_ld_opc[8] = {
810
#ifdef TARGET_WORDS_BIGENDIAN
811
    LDUB, LDUH, LDUW, LDX, LDSB, LDSH, LDSW, LDX
812
#else
813
    LDUB, LDUH_LE, LDUW_LE, LDX_LE, LDSB, LDSH_LE, LDSW_LE, LDX_LE
814
#endif
815
};
816

    
817
static const int qemu_st_opc[4] = {
818
#ifdef TARGET_WORDS_BIGENDIAN
819
    STB, STH, STW, STX
820
#else
821
    STB, STH_LE, STW_LE, STX_LE
822
#endif
823
};
824

    
825
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
826
{
827
    int addrlo_idx = 1, datalo, datahi, addr_reg;
828
#if defined(CONFIG_SOFTMMU)
829
    int memi_idx, memi, s_bits, n;
830
    uint32_t *label_ptr[2];
831
#endif
832

    
833
    datahi = datalo = args[0];
834
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
835
        datahi = args[1];
836
        addrlo_idx = 2;
837
    }
838

    
839
#if defined(CONFIG_SOFTMMU)
840
    memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
841
    memi = args[memi_idx];
842
    s_bits = sizeop & 3;
843

    
844
    addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, s_bits, args,
845
                                offsetof(CPUTLBEntry, addr_read));
846

    
847
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
848
        int reg64;
849

    
850
        /* bne,pn %[xi]cc, label0 */
851
        label_ptr[0] = (uint32_t *)s->code_ptr;
852
        tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_NE, 0) | INSN_OP2(0x1)
853
                      | ((TARGET_LONG_BITS == 64) << 21)));
854

    
855
        /* TLB Hit.  */
856
        /* Load all 64-bits into an O/G register.  */
857
        reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
858
        tcg_out_ldst_rr(s, reg64, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
859

    
860
        /* Move the two 32-bit pieces into the destination registers.  */
861
        tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
862
        if (reg64 != datalo) {
863
            tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
864
        }
865

    
866
        /* b,a,pt label1 */
867
        label_ptr[1] = (uint32_t *)s->code_ptr;
868
        tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x1)
869
                      | (1 << 29) | (1 << 19)));
870
    } else {
871
        /* The fast path is exactly one insn.  Thus we can perform the
872
           entire TLB Hit in the (annulled) delay slot of the branch
873
           over the TLB Miss case.  */
874

    
875
        /* beq,a,pt %[xi]cc, label0 */
876
        label_ptr[0] = NULL;
877
        label_ptr[1] = (uint32_t *)s->code_ptr;
878
        tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1)
879
                      | ((TARGET_LONG_BITS == 64) << 21)
880
                      | (1 << 29) | (1 << 19)));
881
        /* delay slot */
882
        tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
883
    }
884

    
885
    /* TLB Miss.  */
886

    
887
    if (label_ptr[0]) {
888
        *label_ptr[0] |= INSN_OFF19((unsigned long)s->code_ptr -
889
                                    (unsigned long)label_ptr[0]);
890
    }
891
    n = 0;
892
    tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
893
    if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
894
        tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
895
                    args[addrlo_idx + 1]);
896
    }
897
    tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
898
                args[addrlo_idx]);
899

    
900
    /* qemu_ld_helper[s_bits](arg0, arg1) */
901
    tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
902
                           - (tcg_target_ulong)s->code_ptr) >> 2)
903
                         & 0x3fffffff));
904
    /* delay slot */
905
    tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[n], memi);
906

    
907
    n = tcg_target_call_oarg_regs[0];
908
    /* datalo = sign_extend(arg0) */
909
    switch (sizeop) {
910
    case 0 | 4:
911
        /* Recall that SRA sign extends from bit 31 through bit 63.  */
912
        tcg_out_arithi(s, datalo, n, 24, SHIFT_SLL);
913
        tcg_out_arithi(s, datalo, datalo, 24, SHIFT_SRA);
914
        break;
915
    case 1 | 4:
916
        tcg_out_arithi(s, datalo, n, 16, SHIFT_SLL);
917
        tcg_out_arithi(s, datalo, datalo, 16, SHIFT_SRA);
918
        break;
919
    case 2 | 4:
920
        tcg_out_arithi(s, datalo, n, 0, SHIFT_SRA);
921
        break;
922
    case 3:
923
        if (TCG_TARGET_REG_BITS == 32) {
924
            tcg_out_mov(s, TCG_TYPE_REG, datahi, n);
925
            tcg_out_mov(s, TCG_TYPE_REG, datalo, n + 1);
926
            break;
927
        }
928
        /* FALLTHRU */
929
    case 0:
930
    case 1:
931
    case 2:
932
    default:
933
        /* mov */
934
        tcg_out_mov(s, TCG_TYPE_REG, datalo, n);
935
        break;
936
    }
937

    
938
    *label_ptr[1] |= INSN_OFF19((unsigned long)s->code_ptr -
939
                                (unsigned long)label_ptr[1]);
940
#else
941
    addr_reg = args[addrlo_idx];
942
    if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
943
        tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
944
        addr_reg = TCG_REG_T1;
945
    }
946
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
947
        int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
948

    
949
        tcg_out_ldst_rr(s, reg64, addr_reg,
950
                        (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
951
                        qemu_ld_opc[sizeop]);
952

    
953
        tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
954
        if (reg64 != datalo) {
955
            tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
956
        }
957
    } else {
958
        tcg_out_ldst_rr(s, datalo, addr_reg,
959
                        (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
960
                        qemu_ld_opc[sizeop]);
961
    }
962
#endif /* CONFIG_SOFTMMU */
963
}
964

    
965
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
966
{
967
    int addrlo_idx = 1, datalo, datahi, addr_reg;
968
#if defined(CONFIG_SOFTMMU)
969
    int memi_idx, memi, n;
970
    uint32_t *label_ptr;
971
#endif
972

    
973
    datahi = datalo = args[0];
974
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
975
        datahi = args[1];
976
        addrlo_idx = 2;
977
    }
978

    
979
#if defined(CONFIG_SOFTMMU)
980
    memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
981
    memi = args[memi_idx];
982

    
983
    addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, sizeop, args,
984
                                offsetof(CPUTLBEntry, addr_write));
985

    
986
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
987
        /* Reconstruct the full 64-bit value.  */
988
        tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
989
        tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
990
        tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
991
        datalo = TCG_REG_O2;
992
    }
993

    
994
    /* The fast path is exactly one insn.  Thus we can perform the entire
995
       TLB Hit in the (annulled) delay slot of the branch over TLB Miss.  */
996
    /* beq,a,pt %[xi]cc, label0 */
997
    label_ptr = (uint32_t *)s->code_ptr;
998
    tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1)
999
                  | ((TARGET_LONG_BITS == 64) << 21)
1000
                  | (1 << 29) | (1 << 19)));
1001
    /* delay slot */
1002
    tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_st_opc[sizeop]);
1003

    
1004
    /* TLB Miss.  */
1005

    
1006
    n = 0;
1007
    tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1008
    if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1009
        tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1010
                    args[addrlo_idx + 1]);
1011
    }
1012
    tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1013
                args[addrlo_idx]);
1014
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1015
        tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datahi);
1016
    }
1017
    tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datalo);
1018

    
1019
    /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
1020
    tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[sizeop]
1021
                           - (tcg_target_ulong)s->code_ptr) >> 2)
1022
                         & 0x3fffffff));
1023
    /* delay slot */
1024
    tcg_out_movi(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n], memi);
1025

    
1026
    *label_ptr |= INSN_OFF19((unsigned long)s->code_ptr -
1027
                             (unsigned long)label_ptr);
1028
#else
1029
    addr_reg = args[addrlo_idx];
1030
    if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
1031
        tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1032
        addr_reg = TCG_REG_T1;
1033
    }
1034
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1035
        tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
1036
        tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
1037
        tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1038
        datalo = TCG_REG_O2;
1039
    }
1040
    tcg_out_ldst_rr(s, datalo, addr_reg,
1041
                    (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1042
                    qemu_st_opc[sizeop]);
1043
#endif /* CONFIG_SOFTMMU */
1044
}
1045

    
1046
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1047
                              const int *const_args)
1048
{
1049
    int c;
1050

    
1051
    switch (opc) {
1052
    case INDEX_op_exit_tb:
1053
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
1054
        tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
1055
                  INSN_IMM13(8));
1056
        tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
1057
                      INSN_RS2(TCG_REG_G0));
1058
        break;
1059
    case INDEX_op_goto_tb:
1060
        if (s->tb_jmp_offset) {
1061
            /* direct jump method */
1062
            tcg_out_sethi(s, TCG_REG_T1, args[0] & 0xffffe000);
1063
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_T1) |
1064
                      INSN_IMM13((args[0] & 0x1fff)));
1065
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1066
        } else {
1067
            /* indirect jump method */
1068
            tcg_out_ld_ptr(s, TCG_REG_T1,
1069
                           (tcg_target_long)(s->tb_next + args[0]));
1070
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_T1) |
1071
                      INSN_RS2(TCG_REG_G0));
1072
        }
1073
        tcg_out_nop(s);
1074
        s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1075
        break;
1076
    case INDEX_op_call:
1077
        if (const_args[0]) {
1078
            tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
1079
                                   - (tcg_target_ulong)s->code_ptr) >> 2)
1080
                                 & 0x3fffffff));
1081
        } else {
1082
            tcg_out_ld_ptr(s, TCG_REG_T1,
1083
                           (tcg_target_long)(s->tb_next + args[0]));
1084
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_T1) |
1085
                      INSN_RS2(TCG_REG_G0));
1086
        }
1087
        /* delay slot */
1088
        tcg_out_nop(s);
1089
        break;
1090
    case INDEX_op_jmp:
1091
    case INDEX_op_br:
1092
        tcg_out_branch_i32(s, COND_A, args[0]);
1093
        tcg_out_nop(s);
1094
        break;
1095
    case INDEX_op_movi_i32:
1096
        tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1097
        break;
1098

    
1099
#if TCG_TARGET_REG_BITS == 64
1100
#define OP_32_64(x)                             \
1101
        glue(glue(case INDEX_op_, x), _i32):    \
1102
        glue(glue(case INDEX_op_, x), _i64)
1103
#else
1104
#define OP_32_64(x)                             \
1105
        glue(glue(case INDEX_op_, x), _i32)
1106
#endif
1107
    OP_32_64(ld8u):
1108
        tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
1109
        break;
1110
    OP_32_64(ld8s):
1111
        tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
1112
        break;
1113
    OP_32_64(ld16u):
1114
        tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
1115
        break;
1116
    OP_32_64(ld16s):
1117
        tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
1118
        break;
1119
    case INDEX_op_ld_i32:
1120
#if TCG_TARGET_REG_BITS == 64
1121
    case INDEX_op_ld32u_i64:
1122
#endif
1123
        tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
1124
        break;
1125
    OP_32_64(st8):
1126
        tcg_out_ldst(s, args[0], args[1], args[2], STB);
1127
        break;
1128
    OP_32_64(st16):
1129
        tcg_out_ldst(s, args[0], args[1], args[2], STH);
1130
        break;
1131
    case INDEX_op_st_i32:
1132
#if TCG_TARGET_REG_BITS == 64
1133
    case INDEX_op_st32_i64:
1134
#endif
1135
        tcg_out_ldst(s, args[0], args[1], args[2], STW);
1136
        break;
1137
    OP_32_64(add):
1138
        c = ARITH_ADD;
1139
        goto gen_arith;
1140
    OP_32_64(sub):
1141
        c = ARITH_SUB;
1142
        goto gen_arith;
1143
    OP_32_64(and):
1144
        c = ARITH_AND;
1145
        goto gen_arith;
1146
    OP_32_64(andc):
1147
        c = ARITH_ANDN;
1148
        goto gen_arith;
1149
    OP_32_64(or):
1150
        c = ARITH_OR;
1151
        goto gen_arith;
1152
    OP_32_64(orc):
1153
        c = ARITH_ORN;
1154
        goto gen_arith;
1155
    OP_32_64(xor):
1156
        c = ARITH_XOR;
1157
        goto gen_arith;
1158
    case INDEX_op_shl_i32:
1159
        c = SHIFT_SLL;
1160
    do_shift32:
1161
        /* Limit immediate shift count lest we create an illegal insn.  */
1162
        tcg_out_arithc(s, args[0], args[1], args[2] & 31, const_args[2], c);
1163
        break;
1164
    case INDEX_op_shr_i32:
1165
        c = SHIFT_SRL;
1166
        goto do_shift32;
1167
    case INDEX_op_sar_i32:
1168
        c = SHIFT_SRA;
1169
        goto do_shift32;
1170
    case INDEX_op_mul_i32:
1171
        c = ARITH_UMUL;
1172
        goto gen_arith;
1173

    
1174
    OP_32_64(neg):
1175
        c = ARITH_SUB;
1176
        goto gen_arith1;
1177
    OP_32_64(not):
1178
        c = ARITH_ORN;
1179
        goto gen_arith1;
1180

    
1181
    case INDEX_op_div_i32:
1182
        tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
1183
        break;
1184
    case INDEX_op_divu_i32:
1185
        tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
1186
        break;
1187

    
1188
    case INDEX_op_rem_i32:
1189
    case INDEX_op_remu_i32:
1190
        tcg_out_div32(s, TCG_REG_T1, args[1], args[2], const_args[2],
1191
                      opc == INDEX_op_remu_i32);
1192
        tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
1193
                       ARITH_UMUL);
1194
        tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
1195
        break;
1196

    
1197
    case INDEX_op_brcond_i32:
1198
        tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
1199
                           args[3]);
1200
        break;
1201
    case INDEX_op_setcond_i32:
1202
        tcg_out_setcond_i32(s, args[3], args[0], args[1],
1203
                            args[2], const_args[2]);
1204
        break;
1205

    
1206
#if TCG_TARGET_REG_BITS == 32
1207
    case INDEX_op_brcond2_i32:
1208
        tcg_out_brcond2_i32(s, args[4], args[0], args[1],
1209
                            args[2], const_args[2],
1210
                            args[3], const_args[3], args[5]);
1211
        break;
1212
    case INDEX_op_setcond2_i32:
1213
        tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
1214
                             args[3], const_args[3],
1215
                             args[4], const_args[4]);
1216
        break;
1217
    case INDEX_op_add2_i32:
1218
        tcg_out_arithc(s, args[0], args[2], args[4], const_args[4],
1219
                       ARITH_ADDCC);
1220
        tcg_out_arithc(s, args[1], args[3], args[5], const_args[5],
1221
                       ARITH_ADDX);
1222
        break;
1223
    case INDEX_op_sub2_i32:
1224
        tcg_out_arithc(s, args[0], args[2], args[4], const_args[4],
1225
                       ARITH_SUBCC);
1226
        tcg_out_arithc(s, args[1], args[3], args[5], const_args[5],
1227
                       ARITH_SUBX);
1228
        break;
1229
    case INDEX_op_mulu2_i32:
1230
        tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
1231
                       ARITH_UMUL);
1232
        tcg_out_rdy(s, args[1]);
1233
        break;
1234
#endif
1235

    
1236
    case INDEX_op_qemu_ld8u:
1237
        tcg_out_qemu_ld(s, args, 0);
1238
        break;
1239
    case INDEX_op_qemu_ld8s:
1240
        tcg_out_qemu_ld(s, args, 0 | 4);
1241
        break;
1242
    case INDEX_op_qemu_ld16u:
1243
        tcg_out_qemu_ld(s, args, 1);
1244
        break;
1245
    case INDEX_op_qemu_ld16s:
1246
        tcg_out_qemu_ld(s, args, 1 | 4);
1247
        break;
1248
    case INDEX_op_qemu_ld32:
1249
#if TCG_TARGET_REG_BITS == 64
1250
    case INDEX_op_qemu_ld32u:
1251
#endif
1252
        tcg_out_qemu_ld(s, args, 2);
1253
        break;
1254
#if TCG_TARGET_REG_BITS == 64
1255
    case INDEX_op_qemu_ld32s:
1256
        tcg_out_qemu_ld(s, args, 2 | 4);
1257
        break;
1258
#endif
1259
    case INDEX_op_qemu_ld64:
1260
        tcg_out_qemu_ld(s, args, 3);
1261
        break;
1262
    case INDEX_op_qemu_st8:
1263
        tcg_out_qemu_st(s, args, 0);
1264
        break;
1265
    case INDEX_op_qemu_st16:
1266
        tcg_out_qemu_st(s, args, 1);
1267
        break;
1268
    case INDEX_op_qemu_st32:
1269
        tcg_out_qemu_st(s, args, 2);
1270
        break;
1271
    case INDEX_op_qemu_st64:
1272
        tcg_out_qemu_st(s, args, 3);
1273
        break;
1274

    
1275
#if TCG_TARGET_REG_BITS == 64
1276
    case INDEX_op_movi_i64:
1277
        tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1278
        break;
1279
    case INDEX_op_ld32s_i64:
1280
        tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
1281
        break;
1282
    case INDEX_op_ld_i64:
1283
        tcg_out_ldst(s, args[0], args[1], args[2], LDX);
1284
        break;
1285
    case INDEX_op_st_i64:
1286
        tcg_out_ldst(s, args[0], args[1], args[2], STX);
1287
        break;
1288
    case INDEX_op_shl_i64:
1289
        c = SHIFT_SLLX;
1290
    do_shift64:
1291
        /* Limit immediate shift count lest we create an illegal insn.  */
1292
        tcg_out_arithc(s, args[0], args[1], args[2] & 63, const_args[2], c);
1293
        break;
1294
    case INDEX_op_shr_i64:
1295
        c = SHIFT_SRLX;
1296
        goto do_shift64;
1297
    case INDEX_op_sar_i64:
1298
        c = SHIFT_SRAX;
1299
        goto do_shift64;
1300
    case INDEX_op_mul_i64:
1301
        c = ARITH_MULX;
1302
        goto gen_arith;
1303
    case INDEX_op_div_i64:
1304
        c = ARITH_SDIVX;
1305
        goto gen_arith;
1306
    case INDEX_op_divu_i64:
1307
        c = ARITH_UDIVX;
1308
        goto gen_arith;
1309
    case INDEX_op_rem_i64:
1310
    case INDEX_op_remu_i64:
1311
        tcg_out_arithc(s, TCG_REG_T1, args[1], args[2], const_args[2],
1312
                       opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
1313
        tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
1314
                       ARITH_MULX);
1315
        tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
1316
        break;
1317
    case INDEX_op_ext32s_i64:
1318
        if (const_args[1]) {
1319
            tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]);
1320
        } else {
1321
            tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
1322
        }
1323
        break;
1324
    case INDEX_op_ext32u_i64:
1325
        if (const_args[1]) {
1326
            tcg_out_movi_imm32(s, args[0], args[1]);
1327
        } else {
1328
            tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
1329
        }
1330
        break;
1331

    
1332
    case INDEX_op_brcond_i64:
1333
        tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
1334
                           args[3]);
1335
        break;
1336
    case INDEX_op_setcond_i64:
1337
        tcg_out_setcond_i64(s, args[3], args[0], args[1],
1338
                            args[2], const_args[2]);
1339
        break;
1340

    
1341
#endif
1342
    gen_arith:
1343
        tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
1344
        break;
1345

    
1346
    gen_arith1:
1347
        tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
1348
        break;
1349

    
1350
    default:
1351
        fprintf(stderr, "unknown opcode 0x%x\n", opc);
1352
        tcg_abort();
1353
    }
1354
}
1355

    
1356
static const TCGTargetOpDef sparc_op_defs[] = {
1357
    { INDEX_op_exit_tb, { } },
1358
    { INDEX_op_goto_tb, { } },
1359
    { INDEX_op_call, { "ri" } },
1360
    { INDEX_op_jmp, { "ri" } },
1361
    { INDEX_op_br, { } },
1362

    
1363
    { INDEX_op_mov_i32, { "r", "r" } },
1364
    { INDEX_op_movi_i32, { "r" } },
1365
    { INDEX_op_ld8u_i32, { "r", "r" } },
1366
    { INDEX_op_ld8s_i32, { "r", "r" } },
1367
    { INDEX_op_ld16u_i32, { "r", "r" } },
1368
    { INDEX_op_ld16s_i32, { "r", "r" } },
1369
    { INDEX_op_ld_i32, { "r", "r" } },
1370
    { INDEX_op_st8_i32, { "r", "r" } },
1371
    { INDEX_op_st16_i32, { "r", "r" } },
1372
    { INDEX_op_st_i32, { "r", "r" } },
1373

    
1374
    { INDEX_op_add_i32, { "r", "r", "rJ" } },
1375
    { INDEX_op_mul_i32, { "r", "r", "rJ" } },
1376
    { INDEX_op_div_i32, { "r", "r", "rJ" } },
1377
    { INDEX_op_divu_i32, { "r", "r", "rJ" } },
1378
    { INDEX_op_rem_i32, { "r", "r", "rJ" } },
1379
    { INDEX_op_remu_i32, { "r", "r", "rJ" } },
1380
    { INDEX_op_sub_i32, { "r", "r", "rJ" } },
1381
    { INDEX_op_and_i32, { "r", "r", "rJ" } },
1382
    { INDEX_op_andc_i32, { "r", "r", "rJ" } },
1383
    { INDEX_op_or_i32, { "r", "r", "rJ" } },
1384
    { INDEX_op_orc_i32, { "r", "r", "rJ" } },
1385
    { INDEX_op_xor_i32, { "r", "r", "rJ" } },
1386

    
1387
    { INDEX_op_shl_i32, { "r", "r", "rJ" } },
1388
    { INDEX_op_shr_i32, { "r", "r", "rJ" } },
1389
    { INDEX_op_sar_i32, { "r", "r", "rJ" } },
1390

    
1391
    { INDEX_op_neg_i32, { "r", "rJ" } },
1392
    { INDEX_op_not_i32, { "r", "rJ" } },
1393

    
1394
    { INDEX_op_brcond_i32, { "r", "rJ" } },
1395
    { INDEX_op_setcond_i32, { "r", "r", "rJ" } },
1396

    
1397
#if TCG_TARGET_REG_BITS == 32
1398
    { INDEX_op_brcond2_i32, { "r", "r", "rJ", "rJ" } },
1399
    { INDEX_op_setcond2_i32, { "r", "r", "r", "rJ", "rJ" } },
1400
    { INDEX_op_add2_i32, { "r", "r", "r", "r", "rJ", "rJ" } },
1401
    { INDEX_op_sub2_i32, { "r", "r", "r", "r", "rJ", "rJ" } },
1402
    { INDEX_op_mulu2_i32, { "r", "r", "r", "rJ" } },
1403
#endif
1404

    
1405
#if TCG_TARGET_REG_BITS == 64
1406
    { INDEX_op_mov_i64, { "r", "r" } },
1407
    { INDEX_op_movi_i64, { "r" } },
1408
    { INDEX_op_ld8u_i64, { "r", "r" } },
1409
    { INDEX_op_ld8s_i64, { "r", "r" } },
1410
    { INDEX_op_ld16u_i64, { "r", "r" } },
1411
    { INDEX_op_ld16s_i64, { "r", "r" } },
1412
    { INDEX_op_ld32u_i64, { "r", "r" } },
1413
    { INDEX_op_ld32s_i64, { "r", "r" } },
1414
    { INDEX_op_ld_i64, { "r", "r" } },
1415
    { INDEX_op_st8_i64, { "r", "r" } },
1416
    { INDEX_op_st16_i64, { "r", "r" } },
1417
    { INDEX_op_st32_i64, { "r", "r" } },
1418
    { INDEX_op_st_i64, { "r", "r" } },
1419

    
1420
    { INDEX_op_add_i64, { "r", "r", "rJ" } },
1421
    { INDEX_op_mul_i64, { "r", "r", "rJ" } },
1422
    { INDEX_op_div_i64, { "r", "r", "rJ" } },
1423
    { INDEX_op_divu_i64, { "r", "r", "rJ" } },
1424
    { INDEX_op_rem_i64, { "r", "r", "rJ" } },
1425
    { INDEX_op_remu_i64, { "r", "r", "rJ" } },
1426
    { INDEX_op_sub_i64, { "r", "r", "rJ" } },
1427
    { INDEX_op_and_i64, { "r", "r", "rJ" } },
1428
    { INDEX_op_andc_i64, { "r", "r", "rJ" } },
1429
    { INDEX_op_or_i64, { "r", "r", "rJ" } },
1430
    { INDEX_op_orc_i64, { "r", "r", "rJ" } },
1431
    { INDEX_op_xor_i64, { "r", "r", "rJ" } },
1432

    
1433
    { INDEX_op_shl_i64, { "r", "r", "rJ" } },
1434
    { INDEX_op_shr_i64, { "r", "r", "rJ" } },
1435
    { INDEX_op_sar_i64, { "r", "r", "rJ" } },
1436

    
1437
    { INDEX_op_neg_i64, { "r", "rJ" } },
1438
    { INDEX_op_not_i64, { "r", "rJ" } },
1439

    
1440
    { INDEX_op_ext32s_i64, { "r", "ri" } },
1441
    { INDEX_op_ext32u_i64, { "r", "ri" } },
1442

    
1443
    { INDEX_op_brcond_i64, { "r", "rJ" } },
1444
    { INDEX_op_setcond_i64, { "r", "r", "rJ" } },
1445
#endif
1446

    
1447
#if TCG_TARGET_REG_BITS == 64
1448
    { INDEX_op_qemu_ld8u, { "r", "L" } },
1449
    { INDEX_op_qemu_ld8s, { "r", "L" } },
1450
    { INDEX_op_qemu_ld16u, { "r", "L" } },
1451
    { INDEX_op_qemu_ld16s, { "r", "L" } },
1452
    { INDEX_op_qemu_ld32, { "r", "L" } },
1453
    { INDEX_op_qemu_ld32u, { "r", "L" } },
1454
    { INDEX_op_qemu_ld32s, { "r", "L" } },
1455
    { INDEX_op_qemu_ld64, { "r", "L" } },
1456

    
1457
    { INDEX_op_qemu_st8, { "L", "L" } },
1458
    { INDEX_op_qemu_st16, { "L", "L" } },
1459
    { INDEX_op_qemu_st32, { "L", "L" } },
1460
    { INDEX_op_qemu_st64, { "L", "L" } },
1461
#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1462
    { INDEX_op_qemu_ld8u, { "r", "L" } },
1463
    { INDEX_op_qemu_ld8s, { "r", "L" } },
1464
    { INDEX_op_qemu_ld16u, { "r", "L" } },
1465
    { INDEX_op_qemu_ld16s, { "r", "L" } },
1466
    { INDEX_op_qemu_ld32, { "r", "L" } },
1467
    { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1468

    
1469
    { INDEX_op_qemu_st8, { "L", "L" } },
1470
    { INDEX_op_qemu_st16, { "L", "L" } },
1471
    { INDEX_op_qemu_st32, { "L", "L" } },
1472
    { INDEX_op_qemu_st64, { "L", "L", "L" } },
1473
#else
1474
    { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1475
    { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1476
    { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1477
    { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1478
    { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1479
    { INDEX_op_qemu_ld64, { "L", "L", "L", "L" } },
1480

    
1481
    { INDEX_op_qemu_st8, { "L", "L", "L" } },
1482
    { INDEX_op_qemu_st16, { "L", "L", "L" } },
1483
    { INDEX_op_qemu_st32, { "L", "L", "L" } },
1484
    { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
1485
#endif
1486

    
1487
    { -1 },
1488
};
1489

    
1490
static void tcg_target_init(TCGContext *s)
1491
{
1492
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1493
#if TCG_TARGET_REG_BITS == 64
1494
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1495
#endif
1496
    tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1497
                     (1 << TCG_REG_G1) |
1498
                     (1 << TCG_REG_G2) |
1499
                     (1 << TCG_REG_G3) |
1500
                     (1 << TCG_REG_G4) |
1501
                     (1 << TCG_REG_G5) |
1502
                     (1 << TCG_REG_G6) |
1503
                     (1 << TCG_REG_G7) |
1504
                     (1 << TCG_REG_O0) |
1505
                     (1 << TCG_REG_O1) |
1506
                     (1 << TCG_REG_O2) |
1507
                     (1 << TCG_REG_O3) |
1508
                     (1 << TCG_REG_O4) |
1509
                     (1 << TCG_REG_O5) |
1510
                     (1 << TCG_REG_O7));
1511

    
1512
    tcg_regset_clear(s->reserved_regs);
1513
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1514
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1515
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1516
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1517
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1518
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1519
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1520
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1521

    
1522
    tcg_add_target_add_op_defs(sparc_op_defs);
1523
}
1524

    
1525
#if TCG_TARGET_REG_BITS == 64
1526
# define ELF_HOST_MACHINE  EM_SPARCV9
1527
#else
1528
# define ELF_HOST_MACHINE  EM_SPARC32PLUS
1529
# define ELF_HOST_FLAGS    EF_SPARC_32PLUS
1530
#endif
1531

    
1532
typedef struct {
1533
    uint32_t len __attribute__((aligned((sizeof(void *)))));
1534
    uint32_t id;
1535
    uint8_t version;
1536
    char augmentation[1];
1537
    uint8_t code_align;
1538
    uint8_t data_align;
1539
    uint8_t return_column;
1540
} DebugFrameCIE;
1541

    
1542
typedef struct {
1543
    uint32_t len __attribute__((aligned((sizeof(void *)))));
1544
    uint32_t cie_offset;
1545
    tcg_target_long func_start __attribute__((packed));
1546
    tcg_target_long func_len __attribute__((packed));
1547
    uint8_t def_cfa[TCG_TARGET_REG_BITS == 64 ? 4 : 2];
1548
    uint8_t win_save;
1549
    uint8_t ret_save[3];
1550
} DebugFrameFDE;
1551

    
1552
typedef struct {
1553
    DebugFrameCIE cie;
1554
    DebugFrameFDE fde;
1555
} DebugFrame;
1556

    
1557
static DebugFrame debug_frame = {
1558
    .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1559
    .cie.id = -1,
1560
    .cie.version = 1,
1561
    .cie.code_align = 1,
1562
    .cie.data_align = -sizeof(void *) & 0x7f,
1563
    .cie.return_column = 15,            /* o7 */
1564

    
1565
    .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
1566
    .fde.def_cfa = {
1567
#if TCG_TARGET_REG_BITS == 64
1568
        12, 30,                         /* DW_CFA_def_cfa i6, 2047 */
1569
        (2047 & 0x7f) | 0x80, (2047 >> 7)
1570
#else
1571
        13, 30                          /* DW_CFA_def_cfa_register i6 */
1572
#endif
1573
    },
1574
    .fde.win_save = 0x2d,               /* DW_CFA_GNU_window_save */
1575
    .fde.ret_save = { 9, 15, 31 },      /* DW_CFA_register o7, i7 */
1576
};
1577

    
1578
void tcg_register_jit(void *buf, size_t buf_size)
1579
{
1580
    debug_frame.fde.func_start = (tcg_target_long) buf;
1581
    debug_frame.fde.func_len = buf_size;
1582

    
1583
    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1584
}