Statistics
| Branch: | Revision:

root / tcg / sparc / tcg-target.c @ 4c3204cb

History | View | Annotate | Download (50.4 kB)

1
/*
2
 * Tiny Code Generator for QEMU
3
 *
4
 * Copyright (c) 2008 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#ifndef NDEBUG
26
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27
    "%g0",
28
    "%g1",
29
    "%g2",
30
    "%g3",
31
    "%g4",
32
    "%g5",
33
    "%g6",
34
    "%g7",
35
    "%o0",
36
    "%o1",
37
    "%o2",
38
    "%o3",
39
    "%o4",
40
    "%o5",
41
    "%o6",
42
    "%o7",
43
    "%l0",
44
    "%l1",
45
    "%l2",
46
    "%l3",
47
    "%l4",
48
    "%l5",
49
    "%l6",
50
    "%l7",
51
    "%i0",
52
    "%i1",
53
    "%i2",
54
    "%i3",
55
    "%i4",
56
    "%i5",
57
    "%i6",
58
    "%i7",
59
};
60
#endif
61

    
62
#ifdef CONFIG_USE_GUEST_BASE
63
# define TCG_GUEST_BASE_REG TCG_REG_I3
64
#else
65
# define TCG_GUEST_BASE_REG TCG_REG_G0
66
#endif
67

    
68
static const int tcg_target_reg_alloc_order[] = {
69
    TCG_REG_L0,
70
    TCG_REG_L1,
71
    TCG_REG_L2,
72
    TCG_REG_L3,
73
    TCG_REG_L4,
74
    TCG_REG_L5,
75
    TCG_REG_L6,
76
    TCG_REG_L7,
77
    TCG_REG_I0,
78
    TCG_REG_I1,
79
    TCG_REG_I2,
80
    TCG_REG_I3,
81
    TCG_REG_I4,
82
};
83

    
84
static const int tcg_target_call_iarg_regs[6] = {
85
    TCG_REG_O0,
86
    TCG_REG_O1,
87
    TCG_REG_O2,
88
    TCG_REG_O3,
89
    TCG_REG_O4,
90
    TCG_REG_O5,
91
};
92

    
93
static const int tcg_target_call_oarg_regs[] = {
94
    TCG_REG_O0,
95
    TCG_REG_O1,
96
    TCG_REG_O2,
97
    TCG_REG_O3,
98
};
99

    
100
static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
101
{
102
    return (val << ((sizeof(tcg_target_long) * 8 - bits))
103
            >> (sizeof(tcg_target_long) * 8 - bits)) == val;
104
}
105

    
106
static inline int check_fit_i32(uint32_t val, unsigned int bits)
107
{
108
    return ((val << (32 - bits)) >> (32 - bits)) == val;
109
}
110

    
111
static void patch_reloc(uint8_t *code_ptr, int type,
112
                        tcg_target_long value, tcg_target_long addend)
113
{
114
    value += addend;
115
    switch (type) {
116
    case R_SPARC_32:
117
        if (value != (uint32_t)value)
118
            tcg_abort();
119
        *(uint32_t *)code_ptr = value;
120
        break;
121
    case R_SPARC_WDISP22:
122
        value -= (long)code_ptr;
123
        value >>= 2;
124
        if (!check_fit_tl(value, 22))
125
            tcg_abort();
126
        *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x3fffff) | value;
127
        break;
128
    case R_SPARC_WDISP19:
129
        value -= (long)code_ptr;
130
        value >>= 2;
131
        if (!check_fit_tl(value, 19))
132
            tcg_abort();
133
        *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x7ffff) | value;
134
        break;
135
    default:
136
        tcg_abort();
137
    }
138
}
139

    
140
/* maximum number of register used for input function arguments */
141
static inline int tcg_target_get_call_iarg_regs_count(int flags)
142
{
143
    return 6;
144
}
145

    
146
/* parse target specific constraints */
147
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
148
{
149
    const char *ct_str;
150

    
151
    ct_str = *pct_str;
152
    switch (ct_str[0]) {
153
    case 'r':
154
        ct->ct |= TCG_CT_REG;
155
        tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
156
        break;
157
    case 'L': /* qemu_ld/st constraint */
158
        ct->ct |= TCG_CT_REG;
159
        tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
160
        // Helper args
161
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
162
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
163
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
164
        break;
165
    case 'I':
166
        ct->ct |= TCG_CT_CONST_S11;
167
        break;
168
    case 'J':
169
        ct->ct |= TCG_CT_CONST_S13;
170
        break;
171
    default:
172
        return -1;
173
    }
174
    ct_str++;
175
    *pct_str = ct_str;
176
    return 0;
177
}
178

    
179
/* test if a constant matches the constraint */
180
static inline int tcg_target_const_match(tcg_target_long val,
181
                                         const TCGArgConstraint *arg_ct)
182
{
183
    int ct;
184

    
185
    ct = arg_ct->ct;
186
    if (ct & TCG_CT_CONST)
187
        return 1;
188
    else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11))
189
        return 1;
190
    else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13))
191
        return 1;
192
    else
193
        return 0;
194
}
195

    
196
#define INSN_OP(x)  ((x) << 30)
197
#define INSN_OP2(x) ((x) << 22)
198
#define INSN_OP3(x) ((x) << 19)
199
#define INSN_OPF(x) ((x) << 5)
200
#define INSN_RD(x)  ((x) << 25)
201
#define INSN_RS1(x) ((x) << 14)
202
#define INSN_RS2(x) (x)
203
#define INSN_ASI(x) ((x) << 5)
204

    
205
#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
206
#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
207
#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
208
#define INSN_OFF22(x) (((x) >> 2) & 0x3fffff)
209

    
210
#define INSN_COND(x, a) (((x) << 25) | ((a) << 29))
211
#define COND_N     0x0
212
#define COND_E     0x1
213
#define COND_LE    0x2
214
#define COND_L     0x3
215
#define COND_LEU   0x4
216
#define COND_CS    0x5
217
#define COND_NEG   0x6
218
#define COND_VS    0x7
219
#define COND_A     0x8
220
#define COND_NE    0x9
221
#define COND_G     0xa
222
#define COND_GE    0xb
223
#define COND_GU    0xc
224
#define COND_CC    0xd
225
#define COND_POS   0xe
226
#define COND_VC    0xf
227
#define BA         (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2))
228

    
229
#define MOVCC_ICC  (1 << 18)
230
#define MOVCC_XCC  (1 << 18 | 1 << 12)
231

    
232
#define ARITH_ADD  (INSN_OP(2) | INSN_OP3(0x00))
233
#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
234
#define ARITH_AND  (INSN_OP(2) | INSN_OP3(0x01))
235
#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
236
#define ARITH_OR   (INSN_OP(2) | INSN_OP3(0x02))
237
#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
238
#define ARITH_ORN  (INSN_OP(2) | INSN_OP3(0x06))
239
#define ARITH_XOR  (INSN_OP(2) | INSN_OP3(0x03))
240
#define ARITH_SUB  (INSN_OP(2) | INSN_OP3(0x04))
241
#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
242
#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
243
#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
244
#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
245
#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
246
#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
247
#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
248
#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
249
#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
250
#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
251

    
252
#define SHIFT_SLL  (INSN_OP(2) | INSN_OP3(0x25))
253
#define SHIFT_SRL  (INSN_OP(2) | INSN_OP3(0x26))
254
#define SHIFT_SRA  (INSN_OP(2) | INSN_OP3(0x27))
255

    
256
#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
257
#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
258
#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
259

    
260
#define RDY        (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
261
#define WRY        (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
262
#define JMPL       (INSN_OP(2) | INSN_OP3(0x38))
263
#define SAVE       (INSN_OP(2) | INSN_OP3(0x3c))
264
#define RESTORE    (INSN_OP(2) | INSN_OP3(0x3d))
265
#define SETHI      (INSN_OP(0) | INSN_OP2(0x4))
266
#define CALL       INSN_OP(1)
267
#define LDUB       (INSN_OP(3) | INSN_OP3(0x01))
268
#define LDSB       (INSN_OP(3) | INSN_OP3(0x09))
269
#define LDUH       (INSN_OP(3) | INSN_OP3(0x02))
270
#define LDSH       (INSN_OP(3) | INSN_OP3(0x0a))
271
#define LDUW       (INSN_OP(3) | INSN_OP3(0x00))
272
#define LDSW       (INSN_OP(3) | INSN_OP3(0x08))
273
#define LDX        (INSN_OP(3) | INSN_OP3(0x0b))
274
#define STB        (INSN_OP(3) | INSN_OP3(0x05))
275
#define STH        (INSN_OP(3) | INSN_OP3(0x06))
276
#define STW        (INSN_OP(3) | INSN_OP3(0x04))
277
#define STX        (INSN_OP(3) | INSN_OP3(0x0e))
278
#define LDUBA      (INSN_OP(3) | INSN_OP3(0x11))
279
#define LDSBA      (INSN_OP(3) | INSN_OP3(0x19))
280
#define LDUHA      (INSN_OP(3) | INSN_OP3(0x12))
281
#define LDSHA      (INSN_OP(3) | INSN_OP3(0x1a))
282
#define LDUWA      (INSN_OP(3) | INSN_OP3(0x10))
283
#define LDSWA      (INSN_OP(3) | INSN_OP3(0x18))
284
#define LDXA       (INSN_OP(3) | INSN_OP3(0x1b))
285
#define STBA       (INSN_OP(3) | INSN_OP3(0x15))
286
#define STHA       (INSN_OP(3) | INSN_OP3(0x16))
287
#define STWA       (INSN_OP(3) | INSN_OP3(0x14))
288
#define STXA       (INSN_OP(3) | INSN_OP3(0x1e))
289

    
290
#ifndef ASI_PRIMARY_LITTLE
291
#define ASI_PRIMARY_LITTLE 0x88
292
#endif
293

    
294
#define LDUH_LE    (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
295
#define LDSH_LE    (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
296
#define LDUW_LE    (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
297
#define LDSW_LE    (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
298
#define LDX_LE     (LDXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
299

    
300
#define STH_LE     (STHA  | INSN_ASI(ASI_PRIMARY_LITTLE))
301
#define STW_LE     (STWA  | INSN_ASI(ASI_PRIMARY_LITTLE))
302
#define STX_LE     (STXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
303

    
304
static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
305
                                 int op)
306
{
307
    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
308
              INSN_RS2(rs2));
309
}
310

    
311
static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
312
                                  uint32_t offset, int op)
313
{
314
    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
315
              INSN_IMM13(offset));
316
}
317

    
318
static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
319
                           int val2, int val2const, int op)
320
{
321
    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
322
              | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
323
}
324

    
325
static inline void tcg_out_mov(TCGContext *s, TCGType type,
326
                               TCGReg ret, TCGReg arg)
327
{
328
    tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
329
}
330

    
331
static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
332
{
333
    tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
334
}
335

    
336
static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
337
{
338
    tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
339
}
340

    
341
static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
342
{
343
    if (check_fit_tl(arg, 13))
344
        tcg_out_movi_imm13(s, ret, arg);
345
    else {
346
        tcg_out_sethi(s, ret, arg);
347
        if (arg & 0x3ff)
348
            tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
349
    }
350
}
351

    
352
static inline void tcg_out_movi(TCGContext *s, TCGType type,
353
                                TCGReg ret, tcg_target_long arg)
354
{
355
    /* All 32-bit constants, as well as 64-bit constants with
356
       no high bits set go through movi_imm32.  */
357
    if (TCG_TARGET_REG_BITS == 32
358
        || type == TCG_TYPE_I32
359
        || (arg & ~(tcg_target_long)0xffffffff) == 0) {
360
        tcg_out_movi_imm32(s, ret, arg);
361
    } else if (check_fit_tl(arg, 13)) {
362
        /* A 13-bit constant sign-extended to 64-bits.  */
363
        tcg_out_movi_imm13(s, ret, arg);
364
    } else if (check_fit_tl(arg, 32)) {
365
        /* A 32-bit constant sign-extended to 64-bits.  */
366
        tcg_out_sethi(s, ret, ~arg);
367
        tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
368
    } else {
369
        tcg_out_movi_imm32(s, TCG_REG_I4, arg >> (TCG_TARGET_REG_BITS / 2));
370
        tcg_out_arithi(s, TCG_REG_I4, TCG_REG_I4, 32, SHIFT_SLLX);
371
        tcg_out_movi_imm32(s, ret, arg);
372
        tcg_out_arith(s, ret, ret, TCG_REG_I4, ARITH_OR);
373
    }
374
}
375

    
376
static inline void tcg_out_ldst_rr(TCGContext *s, int data, int a1,
377
                                   int a2, int op)
378
{
379
    tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
380
}
381

    
382
static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
383
                                int offset, int op)
384
{
385
    if (check_fit_tl(offset, 13)) {
386
        tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
387
                  INSN_IMM13(offset));
388
    } else {
389
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, offset);
390
        tcg_out_ldst_rr(s, ret, addr, TCG_REG_I5, op);
391
    }
392
}
393

    
394
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
395
                              TCGReg arg1, tcg_target_long arg2)
396
{
397
    tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
398
}
399

    
400
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
401
                              TCGReg arg1, tcg_target_long arg2)
402
{
403
    tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
404
}
405

    
406
static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
407
                                  tcg_target_long arg)
408
{
409
    if (!check_fit_tl(arg, 10)) {
410
        tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
411
    }
412
    tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
413
}
414

    
415
static inline void tcg_out_sety(TCGContext *s, int rs)
416
{
417
    tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
418
}
419

    
420
static inline void tcg_out_rdy(TCGContext *s, int rd)
421
{
422
    tcg_out32(s, RDY | INSN_RD(rd));
423
}
424

    
425
static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
426
{
427
    if (val != 0) {
428
        if (check_fit_tl(val, 13))
429
            tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
430
        else {
431
            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, val);
432
            tcg_out_arith(s, reg, reg, TCG_REG_I5, ARITH_ADD);
433
        }
434
    }
435
}
436

    
437
static inline void tcg_out_andi(TCGContext *s, int rd, int rs,
438
                                tcg_target_long val)
439
{
440
    if (val != 0) {
441
        if (check_fit_tl(val, 13))
442
            tcg_out_arithi(s, rd, rs, val, ARITH_AND);
443
        else {
444
            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_I5, val);
445
            tcg_out_arith(s, rd, rs, TCG_REG_I5, ARITH_AND);
446
        }
447
    }
448
}
449

    
450
static void tcg_out_div32(TCGContext *s, int rd, int rs1,
451
                          int val2, int val2const, int uns)
452
{
453
    /* Load Y with the sign/zero extension of RS1 to 64-bits.  */
454
    if (uns) {
455
        tcg_out_sety(s, TCG_REG_G0);
456
    } else {
457
        tcg_out_arithi(s, TCG_REG_I5, rs1, 31, SHIFT_SRA);
458
        tcg_out_sety(s, TCG_REG_I5);
459
    }
460

    
461
    tcg_out_arithc(s, rd, rs1, val2, val2const,
462
                   uns ? ARITH_UDIV : ARITH_SDIV);
463
}
464

    
465
static inline void tcg_out_nop(TCGContext *s)
466
{
467
    tcg_out_sethi(s, TCG_REG_G0, 0);
468
}
469

    
470
static void tcg_out_branch_i32(TCGContext *s, int opc, int label_index)
471
{
472
    TCGLabel *l = &s->labels[label_index];
473

    
474
    if (l->has_value) {
475
        tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2)
476
                      | INSN_OFF22(l->u.value - (unsigned long)s->code_ptr)));
477
    } else {
478
        tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP22, label_index, 0);
479
        tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2) | 0));
480
    }
481
}
482

    
483
#if TCG_TARGET_REG_BITS == 64
484
static void tcg_out_branch_i64(TCGContext *s, int opc, int label_index)
485
{
486
    TCGLabel *l = &s->labels[label_index];
487

    
488
    if (l->has_value) {
489
        tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x1) |
490
                      (0x5 << 19) |
491
                      INSN_OFF19(l->u.value - (unsigned long)s->code_ptr)));
492
    } else {
493
        tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label_index, 0);
494
        tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x1) |
495
                      (0x5 << 19) | 0));
496
    }
497
}
498
#endif
499

    
500
static const uint8_t tcg_cond_to_bcond[10] = {
501
    [TCG_COND_EQ] = COND_E,
502
    [TCG_COND_NE] = COND_NE,
503
    [TCG_COND_LT] = COND_L,
504
    [TCG_COND_GE] = COND_GE,
505
    [TCG_COND_LE] = COND_LE,
506
    [TCG_COND_GT] = COND_G,
507
    [TCG_COND_LTU] = COND_CS,
508
    [TCG_COND_GEU] = COND_CC,
509
    [TCG_COND_LEU] = COND_LEU,
510
    [TCG_COND_GTU] = COND_GU,
511
};
512

    
513
static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
514
{
515
    tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
516
}
517

    
518
static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond,
519
                               TCGArg arg1, TCGArg arg2, int const_arg2,
520
                               int label_index)
521
{
522
    tcg_out_cmp(s, arg1, arg2, const_arg2);
523
    tcg_out_branch_i32(s, tcg_cond_to_bcond[cond], label_index);
524
    tcg_out_nop(s);
525
}
526

    
527
#if TCG_TARGET_REG_BITS == 64
528
static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond,
529
                               TCGArg arg1, TCGArg arg2, int const_arg2,
530
                               int label_index)
531
{
532
    tcg_out_cmp(s, arg1, arg2, const_arg2);
533
    tcg_out_branch_i64(s, tcg_cond_to_bcond[cond], label_index);
534
    tcg_out_nop(s);
535
}
536
#else
537
static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
538
                                TCGArg al, TCGArg ah,
539
                                TCGArg bl, int blconst,
540
                                TCGArg bh, int bhconst, int label_dest)
541
{
542
    int cc, label_next = gen_new_label();
543

    
544
    tcg_out_cmp(s, ah, bh, bhconst);
545

    
546
    /* Note that we fill one of the delay slots with the second compare.  */
547
    switch (cond) {
548
    case TCG_COND_EQ:
549
        cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
550
        tcg_out_branch_i32(s, cc, label_next);
551
        tcg_out_cmp(s, al, bl, blconst);
552
        cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_EQ], 0);
553
        tcg_out_branch_i32(s, cc, label_dest);
554
        break;
555

    
556
    case TCG_COND_NE:
557
        cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
558
        tcg_out_branch_i32(s, cc, label_dest);
559
        tcg_out_cmp(s, al, bl, blconst);
560
        tcg_out_branch_i32(s, cc, label_dest);
561
        break;
562

    
563
    default:
564
        /* ??? One could fairly easily special-case 64-bit unsigned
565
           compares against 32-bit zero-extended constants.  For instance,
566
           we know that (unsigned)AH < 0 is false and need not emit it.
567
           Similarly, (unsigned)AH > 0 being true implies AH != 0, so the
568
           second branch will never be taken.  */
569
        cc = INSN_COND(tcg_cond_to_bcond[cond], 0);
570
        tcg_out_branch_i32(s, cc, label_dest);
571
        tcg_out_nop(s);
572
        cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
573
        tcg_out_branch_i32(s, cc, label_next);
574
        tcg_out_cmp(s, al, bl, blconst);
575
        cc = INSN_COND(tcg_cond_to_bcond[tcg_unsigned_cond(cond)], 0);
576
        tcg_out_branch_i32(s, cc, label_dest);
577
        break;
578
    }
579
    tcg_out_nop(s);
580

    
581
    tcg_out_label(s, label_next, s->code_ptr);
582
}
583
#endif
584

    
585
static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
586
                                TCGArg c1, TCGArg c2, int c2const)
587
{
588
    TCGArg t;
589

    
590
    /* For 32-bit comparisons, we can play games with ADDX/SUBX.  */
591
    switch (cond) {
592
    case TCG_COND_EQ:
593
    case TCG_COND_NE:
594
        if (c2 != 0) {
595
            tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
596
        }
597
        c1 = TCG_REG_G0, c2 = ret, c2const = 0;
598
        cond = (cond == TCG_COND_EQ ? TCG_COND_LEU : TCG_COND_LTU);
599
        break;
600

    
601
    case TCG_COND_GTU:
602
    case TCG_COND_GEU:
603
        if (c2const && c2 != 0) {
604
            tcg_out_movi_imm13(s, TCG_REG_I5, c2);
605
            c2 = TCG_REG_I5;
606
        }
607
        t = c1, c1 = c2, c2 = t, c2const = 0;
608
        cond = tcg_swap_cond(cond);
609
        break;
610

    
611
    case TCG_COND_LTU:
612
    case TCG_COND_LEU:
613
        break;
614

    
615
    default:
616
        tcg_out_cmp(s, c1, c2, c2const);
617
        tcg_out_movi_imm13(s, ret, 0);
618
        tcg_out32(s, ARITH_MOVCC | INSN_RD(ret)
619
                  | INSN_RS1(tcg_cond_to_bcond[cond])
620
                  | MOVCC_ICC | INSN_IMM11(1));
621
        return;
622
    }
623

    
624
    tcg_out_cmp(s, c1, c2, c2const);
625
    if (cond == TCG_COND_LTU) {
626
        tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
627
    } else {
628
        tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
629
    }
630
}
631

    
632
#if TCG_TARGET_REG_BITS == 64
633
static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
634
                                TCGArg c1, TCGArg c2, int c2const)
635
{
636
    tcg_out_cmp(s, c1, c2, c2const);
637
    tcg_out_movi_imm13(s, ret, 0);
638
    tcg_out32 (s, ARITH_MOVCC | INSN_RD(ret)
639
               | INSN_RS1(tcg_cond_to_bcond[cond])
640
               | MOVCC_XCC | INSN_IMM11(1));
641
}
642
#else
643
static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
644
                                 TCGArg al, TCGArg ah,
645
                                 TCGArg bl, int blconst,
646
                                 TCGArg bh, int bhconst)
647
{
648
    int lab;
649

    
650
    switch (cond) {
651
    case TCG_COND_EQ:
652
        tcg_out_setcond_i32(s, TCG_COND_EQ, TCG_REG_I5, al, bl, blconst);
653
        tcg_out_setcond_i32(s, TCG_COND_EQ, ret, ah, bh, bhconst);
654
        tcg_out_arith(s, ret, ret, TCG_REG_I5, ARITH_AND);
655
        break;
656

    
657
    case TCG_COND_NE:
658
        tcg_out_setcond_i32(s, TCG_COND_NE, TCG_REG_I5, al, al, blconst);
659
        tcg_out_setcond_i32(s, TCG_COND_NE, ret, ah, bh, bhconst);
660
        tcg_out_arith(s, ret, ret, TCG_REG_I5, ARITH_OR);
661
        break;
662

    
663
    default:
664
        lab = gen_new_label();
665

    
666
        tcg_out_cmp(s, ah, bh, bhconst);
667
        tcg_out_branch_i32(s, INSN_COND(tcg_cond_to_bcond[cond], 1), lab);
668
        tcg_out_movi_imm13(s, ret, 1);
669
        tcg_out_branch_i32(s, INSN_COND(COND_NE, 1), lab);
670
        tcg_out_movi_imm13(s, ret, 0);
671

    
672
        tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), ret, al, bl, blconst);
673

    
674
        tcg_out_label(s, lab, s->code_ptr);
675
        break;
676
    }
677
}
678
#endif
679

    
680
/* Generate global QEMU prologue and epilogue code */
681
static void tcg_target_qemu_prologue(TCGContext *s)
682
{
683
    int tmp_buf_size, frame_size;
684

    
685
    /* The TCG temp buffer is at the top of the frame, immediately
686
       below the frame pointer.  */
687
    tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
688
    tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
689
                  tmp_buf_size);
690

    
691
    /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
692
       otherwise the minimal frame usable by callees.  */
693
    frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
694
    frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
695
    frame_size += TCG_TARGET_STACK_ALIGN - 1;
696
    frame_size &= -TCG_TARGET_STACK_ALIGN;
697
    tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
698
              INSN_IMM13(-frame_size));
699

    
700
#ifdef CONFIG_USE_GUEST_BASE
701
    if (GUEST_BASE != 0) {
702
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
703
        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
704
    }
705
#endif
706

    
707
    tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I1) |
708
              INSN_RS2(TCG_REG_G0));
709
    /* delay slot */
710
    tcg_out_nop(s);
711

    
712
    /* No epilogue required.  We issue ret + restore directly in the TB.  */
713
}
714

    
715
#if defined(CONFIG_SOFTMMU)
716

    
717
#include "../../softmmu_defs.h"
718

    
719
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
720
   int mmu_idx) */
721
static const void * const qemu_ld_helpers[4] = {
722
    helper_ldb_mmu,
723
    helper_ldw_mmu,
724
    helper_ldl_mmu,
725
    helper_ldq_mmu,
726
};
727

    
728
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
729
   uintxx_t val, int mmu_idx) */
730
static const void * const qemu_st_helpers[4] = {
731
    helper_stb_mmu,
732
    helper_stw_mmu,
733
    helper_stl_mmu,
734
    helper_stq_mmu,
735
};
736

    
737
/* Perform the TLB load and compare.
738

739
   Inputs:
740
   ADDRLO_IDX contains the index into ARGS of the low part of the
741
   address; the high part of the address is at ADDR_LOW_IDX+1.
742

743
   MEM_INDEX and S_BITS are the memory context and log2 size of the load.
744

745
   WHICH is the offset into the CPUTLBEntry structure of the slot to read.
746
   This should be offsetof addr_read or addr_write.
747

748
   The result of the TLB comparison is in %[ix]cc.  The sanitized address
749
   is in the returned register, maybe %o0.  The TLB addend is in %o1.  */
750

    
751
static int tcg_out_tlb_load(TCGContext *s, int addrlo_idx, int mem_index,
752
                            int s_bits, const TCGArg *args, int which)
753
{
754
    const int addrlo = args[addrlo_idx];
755
    const int r0 = TCG_REG_O0;
756
    const int r1 = TCG_REG_O1;
757
    const int r2 = TCG_REG_O2;
758
    int addr = addrlo;
759
    int tlb_ofs;
760

    
761
    if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
762
        /* Assemble the 64-bit address in R0.  */
763
        tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
764
        tcg_out_arithi(s, r1, args[addrlo_idx + 1], 32, SHIFT_SLLX);
765
        tcg_out_arith(s, r0, r0, r1, ARITH_OR);
766
    }
767

    
768
    /* Shift the page number down to tlb-entry.  */
769
    tcg_out_arithi(s, r1, addrlo,
770
                   TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
771

    
772
    /* Mask out the page offset, except for the required alignment.  */
773
    tcg_out_andi(s, r0, addr, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
774

    
775
    /* Compute tlb index, modulo tlb size.  */
776
    tcg_out_andi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
777

    
778
    /* Relative to the current ENV.  */
779
    tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
780

    
781
    /* Find a base address that can load both tlb comparator and addend.  */
782
    tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
783
    if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
784
        tcg_out_addi(s, r1, tlb_ofs);
785
        tlb_ofs = 0;
786
    }
787

    
788
    /* Load the tlb comparator and the addend.  */
789
    tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
790
    tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
791

    
792
    /* subcc arg0, arg2, %g0 */
793
    tcg_out_cmp(s, r0, r2, 0);
794

    
795
    /* If the guest address must be zero-extended, do so now.  */
796
    if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
797
        tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
798
        return r0;
799
    }
800
    return addrlo;
801
}
802
#endif /* CONFIG_SOFTMMU */
803

    
804
static const int qemu_ld_opc[8] = {
805
#ifdef TARGET_WORDS_BIGENDIAN
806
    LDUB, LDUH, LDUW, LDX, LDSB, LDSH, LDSW, LDX
807
#else
808
    LDUB, LDUH_LE, LDUW_LE, LDX_LE, LDSB, LDSH_LE, LDSW_LE, LDX_LE
809
#endif
810
};
811

    
812
static const int qemu_st_opc[4] = {
813
#ifdef TARGET_WORDS_BIGENDIAN
814
    STB, STH, STW, STX
815
#else
816
    STB, STH_LE, STW_LE, STX_LE
817
#endif
818
};
819

    
820
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
821
{
822
    int addrlo_idx = 1, datalo, datahi, addr_reg;
823
#if defined(CONFIG_SOFTMMU)
824
    int memi_idx, memi, s_bits, n;
825
    uint32_t *label_ptr[2];
826
#endif
827

    
828
    datahi = datalo = args[0];
829
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
830
        datahi = args[1];
831
        addrlo_idx = 2;
832
    }
833

    
834
#if defined(CONFIG_SOFTMMU)
835
    memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
836
    memi = args[memi_idx];
837
    s_bits = sizeop & 3;
838

    
839
    addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, s_bits, args,
840
                                offsetof(CPUTLBEntry, addr_read));
841

    
842
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
843
        int reg64;
844

    
845
        /* bne,pn %[xi]cc, label0 */
846
        label_ptr[0] = (uint32_t *)s->code_ptr;
847
        tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_NE, 0) | INSN_OP2(0x1)
848
                      | ((TARGET_LONG_BITS == 64) << 21)));
849

    
850
        /* TLB Hit.  */
851
        /* Load all 64-bits into an O/G register.  */
852
        reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
853
        tcg_out_ldst_rr(s, reg64, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
854

    
855
        /* Move the two 32-bit pieces into the destination registers.  */
856
        tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
857
        if (reg64 != datalo) {
858
            tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
859
        }
860

    
861
        /* b,a,pt label1 */
862
        label_ptr[1] = (uint32_t *)s->code_ptr;
863
        tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x1)
864
                      | (1 << 29) | (1 << 19)));
865
    } else {
866
        /* The fast path is exactly one insn.  Thus we can perform the
867
           entire TLB Hit in the (annulled) delay slot of the branch
868
           over the TLB Miss case.  */
869

    
870
        /* beq,a,pt %[xi]cc, label0 */
871
        label_ptr[0] = NULL;
872
        label_ptr[1] = (uint32_t *)s->code_ptr;
873
        tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1)
874
                      | ((TARGET_LONG_BITS == 64) << 21)
875
                      | (1 << 29) | (1 << 19)));
876
        /* delay slot */
877
        tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
878
    }
879

    
880
    /* TLB Miss.  */
881

    
882
    if (label_ptr[0]) {
883
        *label_ptr[0] |= INSN_OFF19((unsigned long)s->code_ptr -
884
                                    (unsigned long)label_ptr[0]);
885
    }
886
    n = 0;
887
    tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
888
    if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
889
        tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
890
                    args[addrlo_idx + 1]);
891
    }
892
    tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
893
                args[addrlo_idx]);
894

    
895
    /* qemu_ld_helper[s_bits](arg0, arg1) */
896
    tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
897
                           - (tcg_target_ulong)s->code_ptr) >> 2)
898
                         & 0x3fffffff));
899
    /* delay slot */
900
    tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[n], memi);
901

    
902
    n = tcg_target_call_oarg_regs[0];
903
    /* datalo = sign_extend(arg0) */
904
    switch (sizeop) {
905
    case 0 | 4:
906
        /* Recall that SRA sign extends from bit 31 through bit 63.  */
907
        tcg_out_arithi(s, datalo, n, 24, SHIFT_SLL);
908
        tcg_out_arithi(s, datalo, datalo, 24, SHIFT_SRA);
909
        break;
910
    case 1 | 4:
911
        tcg_out_arithi(s, datalo, n, 16, SHIFT_SLL);
912
        tcg_out_arithi(s, datalo, datalo, 16, SHIFT_SRA);
913
        break;
914
    case 2 | 4:
915
        tcg_out_arithi(s, datalo, n, 0, SHIFT_SRA);
916
        break;
917
    case 3:
918
        if (TCG_TARGET_REG_BITS == 32) {
919
            tcg_out_mov(s, TCG_TYPE_REG, datahi, n);
920
            tcg_out_mov(s, TCG_TYPE_REG, datalo, n + 1);
921
            break;
922
        }
923
        /* FALLTHRU */
924
    case 0:
925
    case 1:
926
    case 2:
927
    default:
928
        /* mov */
929
        tcg_out_mov(s, TCG_TYPE_REG, datalo, n);
930
        break;
931
    }
932

    
933
    *label_ptr[1] |= INSN_OFF19((unsigned long)s->code_ptr -
934
                                (unsigned long)label_ptr[1]);
935
#else
936
    addr_reg = args[addrlo_idx];
937
    if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
938
        tcg_out_arithi(s, TCG_REG_I5, addr_reg, 0, SHIFT_SRL);
939
        addr_reg = TCG_REG_I5;
940
    }
941
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
942
        int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
943

    
944
        tcg_out_ldst_rr(s, reg64, addr_reg,
945
                        (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
946
                        qemu_ld_opc[sizeop]);
947

    
948
        tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
949
        if (reg64 != datalo) {
950
            tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
951
        }
952
    } else {
953
        tcg_out_ldst_rr(s, datalo, addr_reg,
954
                        (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
955
                        qemu_ld_opc[sizeop]);
956
    }
957
#endif /* CONFIG_SOFTMMU */
958
}
959

    
960
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
961
{
962
    int addrlo_idx = 1, datalo, datahi, addr_reg;
963
#if defined(CONFIG_SOFTMMU)
964
    int memi_idx, memi, n;
965
    uint32_t *label_ptr;
966
#endif
967

    
968
    datahi = datalo = args[0];
969
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
970
        datahi = args[1];
971
        addrlo_idx = 2;
972
    }
973

    
974
#if defined(CONFIG_SOFTMMU)
975
    memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
976
    memi = args[memi_idx];
977

    
978
    addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, sizeop, args,
979
                                offsetof(CPUTLBEntry, addr_write));
980

    
981
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
982
        /* Reconstruct the full 64-bit value in %g1, using %o2 as temp.  */
983
        /* ??? Redefine the temps from %i4/%i5 so that we have a o/g temp. */
984
        tcg_out_arithi(s, TCG_REG_G1, datalo, 0, SHIFT_SRL);
985
        tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
986
        tcg_out_arith(s, TCG_REG_G1, TCG_REG_G1, TCG_REG_O2, ARITH_OR);
987
        datalo = TCG_REG_G1;
988
    }
989

    
990
    /* The fast path is exactly one insn.  Thus we can perform the entire
991
       TLB Hit in the (annulled) delay slot of the branch over TLB Miss.  */
992
    /* beq,a,pt %[xi]cc, label0 */
993
    label_ptr = (uint32_t *)s->code_ptr;
994
    tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1)
995
                  | ((TARGET_LONG_BITS == 64) << 21)
996
                  | (1 << 29) | (1 << 19)));
997
    /* delay slot */
998
    tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_st_opc[sizeop]);
999

    
1000
    /* TLB Miss.  */
1001

    
1002
    n = 0;
1003
    tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1004
    if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1005
        tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1006
                    args[addrlo_idx + 1]);
1007
    }
1008
    tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1009
                args[addrlo_idx]);
1010
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1011
        tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datahi);
1012
    }
1013
    tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datalo);
1014

    
1015
    /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
1016
    tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[sizeop]
1017
                           - (tcg_target_ulong)s->code_ptr) >> 2)
1018
                         & 0x3fffffff));
1019
    /* delay slot */
1020
    tcg_out_movi(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n], memi);
1021

    
1022
    *label_ptr |= INSN_OFF19((unsigned long)s->code_ptr -
1023
                             (unsigned long)label_ptr);
1024
#else
1025
    addr_reg = args[addrlo_idx];
1026
    if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
1027
        tcg_out_arithi(s, TCG_REG_I5, addr_reg, 0, SHIFT_SRL);
1028
        addr_reg = TCG_REG_I5;
1029
    }
1030
    if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1031
        /* Reconstruct the full 64-bit value in %g1, using %o2 as temp.  */
1032
        /* ??? Redefine the temps from %i4/%i5 so that we have a o/g temp. */
1033
        tcg_out_arithi(s, TCG_REG_G1, datalo, 0, SHIFT_SRL);
1034
        tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
1035
        tcg_out_arith(s, TCG_REG_G1, TCG_REG_G1, TCG_REG_O2, ARITH_OR);
1036
        datalo = TCG_REG_G1;
1037
    }
1038
    tcg_out_ldst_rr(s, datalo, addr_reg,
1039
                    (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1040
                    qemu_st_opc[sizeop]);
1041
#endif /* CONFIG_SOFTMMU */
1042
}
1043

    
1044
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1045
                              const int *const_args)
1046
{
1047
    int c;
1048

    
1049
    switch (opc) {
1050
    case INDEX_op_exit_tb:
1051
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
1052
        tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
1053
                  INSN_IMM13(8));
1054
        tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
1055
                      INSN_RS2(TCG_REG_G0));
1056
        break;
1057
    case INDEX_op_goto_tb:
1058
        if (s->tb_jmp_offset) {
1059
            /* direct jump method */
1060
            tcg_out_sethi(s, TCG_REG_I5, args[0] & 0xffffe000);
1061
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
1062
                      INSN_IMM13((args[0] & 0x1fff)));
1063
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1064
        } else {
1065
            /* indirect jump method */
1066
            tcg_out_ld_ptr(s, TCG_REG_I5, (tcg_target_long)(s->tb_next + args[0]));
1067
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
1068
                      INSN_RS2(TCG_REG_G0));
1069
        }
1070
        tcg_out_nop(s);
1071
        s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1072
        break;
1073
    case INDEX_op_call:
1074
        if (const_args[0])
1075
            tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
1076
                                   - (tcg_target_ulong)s->code_ptr) >> 2)
1077
                                 & 0x3fffffff));
1078
        else {
1079
            tcg_out_ld_ptr(s, TCG_REG_I5,
1080
                           (tcg_target_long)(s->tb_next + args[0]));
1081
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_I5) |
1082
                      INSN_RS2(TCG_REG_G0));
1083
        }
1084
        /* delay slot */
1085
        tcg_out_nop(s);
1086
        break;
1087
    case INDEX_op_jmp:
1088
    case INDEX_op_br:
1089
        tcg_out_branch_i32(s, COND_A, args[0]);
1090
        tcg_out_nop(s);
1091
        break;
1092
    case INDEX_op_movi_i32:
1093
        tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1094
        break;
1095

    
1096
#if TCG_TARGET_REG_BITS == 64
1097
#define OP_32_64(x)                             \
1098
        glue(glue(case INDEX_op_, x), _i32):    \
1099
        glue(glue(case INDEX_op_, x), _i64)
1100
#else
1101
#define OP_32_64(x)                             \
1102
        glue(glue(case INDEX_op_, x), _i32)
1103
#endif
1104
    OP_32_64(ld8u):
1105
        tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
1106
        break;
1107
    OP_32_64(ld8s):
1108
        tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
1109
        break;
1110
    OP_32_64(ld16u):
1111
        tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
1112
        break;
1113
    OP_32_64(ld16s):
1114
        tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
1115
        break;
1116
    case INDEX_op_ld_i32:
1117
#if TCG_TARGET_REG_BITS == 64
1118
    case INDEX_op_ld32u_i64:
1119
#endif
1120
        tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
1121
        break;
1122
    OP_32_64(st8):
1123
        tcg_out_ldst(s, args[0], args[1], args[2], STB);
1124
        break;
1125
    OP_32_64(st16):
1126
        tcg_out_ldst(s, args[0], args[1], args[2], STH);
1127
        break;
1128
    case INDEX_op_st_i32:
1129
#if TCG_TARGET_REG_BITS == 64
1130
    case INDEX_op_st32_i64:
1131
#endif
1132
        tcg_out_ldst(s, args[0], args[1], args[2], STW);
1133
        break;
1134
    OP_32_64(add):
1135
        c = ARITH_ADD;
1136
        goto gen_arith;
1137
    OP_32_64(sub):
1138
        c = ARITH_SUB;
1139
        goto gen_arith;
1140
    OP_32_64(and):
1141
        c = ARITH_AND;
1142
        goto gen_arith;
1143
    OP_32_64(andc):
1144
        c = ARITH_ANDN;
1145
        goto gen_arith;
1146
    OP_32_64(or):
1147
        c = ARITH_OR;
1148
        goto gen_arith;
1149
    OP_32_64(orc):
1150
        c = ARITH_ORN;
1151
        goto gen_arith;
1152
    OP_32_64(xor):
1153
        c = ARITH_XOR;
1154
        goto gen_arith;
1155
    case INDEX_op_shl_i32:
1156
        c = SHIFT_SLL;
1157
        goto gen_arith;
1158
    case INDEX_op_shr_i32:
1159
        c = SHIFT_SRL;
1160
        goto gen_arith;
1161
    case INDEX_op_sar_i32:
1162
        c = SHIFT_SRA;
1163
        goto gen_arith;
1164
    case INDEX_op_mul_i32:
1165
        c = ARITH_UMUL;
1166
        goto gen_arith;
1167

    
1168
    OP_32_64(neg):
1169
        c = ARITH_SUB;
1170
        goto gen_arith1;
1171
    OP_32_64(not):
1172
        c = ARITH_ORN;
1173
        goto gen_arith1;
1174

    
1175
    case INDEX_op_div_i32:
1176
        tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
1177
        break;
1178
    case INDEX_op_divu_i32:
1179
        tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
1180
        break;
1181

    
1182
    case INDEX_op_rem_i32:
1183
    case INDEX_op_remu_i32:
1184
        tcg_out_div32(s, TCG_REG_I5, args[1], args[2], const_args[2],
1185
                      opc == INDEX_op_remu_i32);
1186
        tcg_out_arithc(s, TCG_REG_I5, TCG_REG_I5, args[2], const_args[2],
1187
                       ARITH_UMUL);
1188
        tcg_out_arith(s, args[0], args[1], TCG_REG_I5, ARITH_SUB);
1189
        break;
1190

    
1191
    case INDEX_op_brcond_i32:
1192
        tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
1193
                           args[3]);
1194
        break;
1195
    case INDEX_op_setcond_i32:
1196
        tcg_out_setcond_i32(s, args[3], args[0], args[1],
1197
                            args[2], const_args[2]);
1198
        break;
1199

    
1200
#if TCG_TARGET_REG_BITS == 32
1201
    case INDEX_op_brcond2_i32:
1202
        tcg_out_brcond2_i32(s, args[4], args[0], args[1],
1203
                            args[2], const_args[2],
1204
                            args[3], const_args[3], args[5]);
1205
        break;
1206
    case INDEX_op_setcond2_i32:
1207
        tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
1208
                             args[3], const_args[3],
1209
                             args[4], const_args[4]);
1210
        break;
1211
    case INDEX_op_add2_i32:
1212
        tcg_out_arithc(s, args[0], args[2], args[4], const_args[4],
1213
                       ARITH_ADDCC);
1214
        tcg_out_arithc(s, args[1], args[3], args[5], const_args[5],
1215
                       ARITH_ADDX);
1216
        break;
1217
    case INDEX_op_sub2_i32:
1218
        tcg_out_arithc(s, args[0], args[2], args[4], const_args[4],
1219
                       ARITH_SUBCC);
1220
        tcg_out_arithc(s, args[1], args[3], args[5], const_args[5],
1221
                       ARITH_SUBX);
1222
        break;
1223
    case INDEX_op_mulu2_i32:
1224
        tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
1225
                       ARITH_UMUL);
1226
        tcg_out_rdy(s, args[1]);
1227
        break;
1228
#endif
1229

    
1230
    case INDEX_op_qemu_ld8u:
1231
        tcg_out_qemu_ld(s, args, 0);
1232
        break;
1233
    case INDEX_op_qemu_ld8s:
1234
        tcg_out_qemu_ld(s, args, 0 | 4);
1235
        break;
1236
    case INDEX_op_qemu_ld16u:
1237
        tcg_out_qemu_ld(s, args, 1);
1238
        break;
1239
    case INDEX_op_qemu_ld16s:
1240
        tcg_out_qemu_ld(s, args, 1 | 4);
1241
        break;
1242
    case INDEX_op_qemu_ld32:
1243
#if TCG_TARGET_REG_BITS == 64
1244
    case INDEX_op_qemu_ld32u:
1245
#endif
1246
        tcg_out_qemu_ld(s, args, 2);
1247
        break;
1248
#if TCG_TARGET_REG_BITS == 64
1249
    case INDEX_op_qemu_ld32s:
1250
        tcg_out_qemu_ld(s, args, 2 | 4);
1251
        break;
1252
#endif
1253
    case INDEX_op_qemu_ld64:
1254
        tcg_out_qemu_ld(s, args, 3);
1255
        break;
1256
    case INDEX_op_qemu_st8:
1257
        tcg_out_qemu_st(s, args, 0);
1258
        break;
1259
    case INDEX_op_qemu_st16:
1260
        tcg_out_qemu_st(s, args, 1);
1261
        break;
1262
    case INDEX_op_qemu_st32:
1263
        tcg_out_qemu_st(s, args, 2);
1264
        break;
1265
    case INDEX_op_qemu_st64:
1266
        tcg_out_qemu_st(s, args, 3);
1267
        break;
1268

    
1269
#if TCG_TARGET_REG_BITS == 64
1270
    case INDEX_op_movi_i64:
1271
        tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1272
        break;
1273
    case INDEX_op_ld32s_i64:
1274
        tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
1275
        break;
1276
    case INDEX_op_ld_i64:
1277
        tcg_out_ldst(s, args[0], args[1], args[2], LDX);
1278
        break;
1279
    case INDEX_op_st_i64:
1280
        tcg_out_ldst(s, args[0], args[1], args[2], STX);
1281
        break;
1282
    case INDEX_op_shl_i64:
1283
        c = SHIFT_SLLX;
1284
        goto gen_arith;
1285
    case INDEX_op_shr_i64:
1286
        c = SHIFT_SRLX;
1287
        goto gen_arith;
1288
    case INDEX_op_sar_i64:
1289
        c = SHIFT_SRAX;
1290
        goto gen_arith;
1291
    case INDEX_op_mul_i64:
1292
        c = ARITH_MULX;
1293
        goto gen_arith;
1294
    case INDEX_op_div_i64:
1295
        c = ARITH_SDIVX;
1296
        goto gen_arith;
1297
    case INDEX_op_divu_i64:
1298
        c = ARITH_UDIVX;
1299
        goto gen_arith;
1300
    case INDEX_op_rem_i64:
1301
    case INDEX_op_remu_i64:
1302
        tcg_out_arithc(s, TCG_REG_I5, args[1], args[2], const_args[2],
1303
                       opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
1304
        tcg_out_arithc(s, TCG_REG_I5, TCG_REG_I5, args[2], const_args[2],
1305
                       ARITH_MULX);
1306
        tcg_out_arith(s, args[0], args[1], TCG_REG_I5, ARITH_SUB);
1307
        break;
1308
    case INDEX_op_ext32s_i64:
1309
        if (const_args[1]) {
1310
            tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]);
1311
        } else {
1312
            tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
1313
        }
1314
        break;
1315
    case INDEX_op_ext32u_i64:
1316
        if (const_args[1]) {
1317
            tcg_out_movi_imm32(s, args[0], args[1]);
1318
        } else {
1319
            tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
1320
        }
1321
        break;
1322

    
1323
    case INDEX_op_brcond_i64:
1324
        tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
1325
                           args[3]);
1326
        break;
1327
    case INDEX_op_setcond_i64:
1328
        tcg_out_setcond_i64(s, args[3], args[0], args[1],
1329
                            args[2], const_args[2]);
1330
        break;
1331

    
1332
#endif
1333
    gen_arith:
1334
        tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
1335
        break;
1336

    
1337
    gen_arith1:
1338
        tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
1339
        break;
1340

    
1341
    default:
1342
        fprintf(stderr, "unknown opcode 0x%x\n", opc);
1343
        tcg_abort();
1344
    }
1345
}
1346

    
1347
static const TCGTargetOpDef sparc_op_defs[] = {
1348
    { INDEX_op_exit_tb, { } },
1349
    { INDEX_op_goto_tb, { } },
1350
    { INDEX_op_call, { "ri" } },
1351
    { INDEX_op_jmp, { "ri" } },
1352
    { INDEX_op_br, { } },
1353

    
1354
    { INDEX_op_mov_i32, { "r", "r" } },
1355
    { INDEX_op_movi_i32, { "r" } },
1356
    { INDEX_op_ld8u_i32, { "r", "r" } },
1357
    { INDEX_op_ld8s_i32, { "r", "r" } },
1358
    { INDEX_op_ld16u_i32, { "r", "r" } },
1359
    { INDEX_op_ld16s_i32, { "r", "r" } },
1360
    { INDEX_op_ld_i32, { "r", "r" } },
1361
    { INDEX_op_st8_i32, { "r", "r" } },
1362
    { INDEX_op_st16_i32, { "r", "r" } },
1363
    { INDEX_op_st_i32, { "r", "r" } },
1364

    
1365
    { INDEX_op_add_i32, { "r", "r", "rJ" } },
1366
    { INDEX_op_mul_i32, { "r", "r", "rJ" } },
1367
    { INDEX_op_div_i32, { "r", "r", "rJ" } },
1368
    { INDEX_op_divu_i32, { "r", "r", "rJ" } },
1369
    { INDEX_op_rem_i32, { "r", "r", "rJ" } },
1370
    { INDEX_op_remu_i32, { "r", "r", "rJ" } },
1371
    { INDEX_op_sub_i32, { "r", "r", "rJ" } },
1372
    { INDEX_op_and_i32, { "r", "r", "rJ" } },
1373
    { INDEX_op_andc_i32, { "r", "r", "rJ" } },
1374
    { INDEX_op_or_i32, { "r", "r", "rJ" } },
1375
    { INDEX_op_orc_i32, { "r", "r", "rJ" } },
1376
    { INDEX_op_xor_i32, { "r", "r", "rJ" } },
1377

    
1378
    { INDEX_op_shl_i32, { "r", "r", "rJ" } },
1379
    { INDEX_op_shr_i32, { "r", "r", "rJ" } },
1380
    { INDEX_op_sar_i32, { "r", "r", "rJ" } },
1381

    
1382
    { INDEX_op_neg_i32, { "r", "rJ" } },
1383
    { INDEX_op_not_i32, { "r", "rJ" } },
1384

    
1385
    { INDEX_op_brcond_i32, { "r", "rJ" } },
1386
    { INDEX_op_setcond_i32, { "r", "r", "rJ" } },
1387

    
1388
#if TCG_TARGET_REG_BITS == 32
1389
    { INDEX_op_brcond2_i32, { "r", "r", "rJ", "rJ" } },
1390
    { INDEX_op_setcond2_i32, { "r", "r", "r", "rJ", "rJ" } },
1391
    { INDEX_op_add2_i32, { "r", "r", "r", "r", "rJ", "rJ" } },
1392
    { INDEX_op_sub2_i32, { "r", "r", "r", "r", "rJ", "rJ" } },
1393
    { INDEX_op_mulu2_i32, { "r", "r", "r", "rJ" } },
1394
#endif
1395

    
1396
#if TCG_TARGET_REG_BITS == 64
1397
    { INDEX_op_mov_i64, { "r", "r" } },
1398
    { INDEX_op_movi_i64, { "r" } },
1399
    { INDEX_op_ld8u_i64, { "r", "r" } },
1400
    { INDEX_op_ld8s_i64, { "r", "r" } },
1401
    { INDEX_op_ld16u_i64, { "r", "r" } },
1402
    { INDEX_op_ld16s_i64, { "r", "r" } },
1403
    { INDEX_op_ld32u_i64, { "r", "r" } },
1404
    { INDEX_op_ld32s_i64, { "r", "r" } },
1405
    { INDEX_op_ld_i64, { "r", "r" } },
1406
    { INDEX_op_st8_i64, { "r", "r" } },
1407
    { INDEX_op_st16_i64, { "r", "r" } },
1408
    { INDEX_op_st32_i64, { "r", "r" } },
1409
    { INDEX_op_st_i64, { "r", "r" } },
1410

    
1411
    { INDEX_op_add_i64, { "r", "r", "rJ" } },
1412
    { INDEX_op_mul_i64, { "r", "r", "rJ" } },
1413
    { INDEX_op_div_i64, { "r", "r", "rJ" } },
1414
    { INDEX_op_divu_i64, { "r", "r", "rJ" } },
1415
    { INDEX_op_rem_i64, { "r", "r", "rJ" } },
1416
    { INDEX_op_remu_i64, { "r", "r", "rJ" } },
1417
    { INDEX_op_sub_i64, { "r", "r", "rJ" } },
1418
    { INDEX_op_and_i64, { "r", "r", "rJ" } },
1419
    { INDEX_op_andc_i64, { "r", "r", "rJ" } },
1420
    { INDEX_op_or_i64, { "r", "r", "rJ" } },
1421
    { INDEX_op_orc_i64, { "r", "r", "rJ" } },
1422
    { INDEX_op_xor_i64, { "r", "r", "rJ" } },
1423

    
1424
    { INDEX_op_shl_i64, { "r", "r", "rJ" } },
1425
    { INDEX_op_shr_i64, { "r", "r", "rJ" } },
1426
    { INDEX_op_sar_i64, { "r", "r", "rJ" } },
1427

    
1428
    { INDEX_op_neg_i64, { "r", "rJ" } },
1429
    { INDEX_op_not_i64, { "r", "rJ" } },
1430

    
1431
    { INDEX_op_ext32s_i64, { "r", "ri" } },
1432
    { INDEX_op_ext32u_i64, { "r", "ri" } },
1433

    
1434
    { INDEX_op_brcond_i64, { "r", "rJ" } },
1435
    { INDEX_op_setcond_i64, { "r", "r", "rJ" } },
1436
#endif
1437

    
1438
#if TCG_TARGET_REG_BITS == 64
1439
    { INDEX_op_qemu_ld8u, { "r", "L" } },
1440
    { INDEX_op_qemu_ld8s, { "r", "L" } },
1441
    { INDEX_op_qemu_ld16u, { "r", "L" } },
1442
    { INDEX_op_qemu_ld16s, { "r", "L" } },
1443
    { INDEX_op_qemu_ld32, { "r", "L" } },
1444
    { INDEX_op_qemu_ld32u, { "r", "L" } },
1445
    { INDEX_op_qemu_ld32s, { "r", "L" } },
1446
    { INDEX_op_qemu_ld64, { "r", "L" } },
1447

    
1448
    { INDEX_op_qemu_st8, { "L", "L" } },
1449
    { INDEX_op_qemu_st16, { "L", "L" } },
1450
    { INDEX_op_qemu_st32, { "L", "L" } },
1451
    { INDEX_op_qemu_st64, { "L", "L" } },
1452
#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1453
    { INDEX_op_qemu_ld8u, { "r", "L" } },
1454
    { INDEX_op_qemu_ld8s, { "r", "L" } },
1455
    { INDEX_op_qemu_ld16u, { "r", "L" } },
1456
    { INDEX_op_qemu_ld16s, { "r", "L" } },
1457
    { INDEX_op_qemu_ld32, { "r", "L" } },
1458
    { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1459

    
1460
    { INDEX_op_qemu_st8, { "L", "L" } },
1461
    { INDEX_op_qemu_st16, { "L", "L" } },
1462
    { INDEX_op_qemu_st32, { "L", "L" } },
1463
    { INDEX_op_qemu_st64, { "L", "L", "L" } },
1464
#else
1465
    { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1466
    { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1467
    { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1468
    { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1469
    { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1470
    { INDEX_op_qemu_ld64, { "L", "L", "L", "L" } },
1471

    
1472
    { INDEX_op_qemu_st8, { "L", "L", "L" } },
1473
    { INDEX_op_qemu_st16, { "L", "L", "L" } },
1474
    { INDEX_op_qemu_st32, { "L", "L", "L" } },
1475
    { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
1476
#endif
1477

    
1478
    { -1 },
1479
};
1480

    
1481
static void tcg_target_init(TCGContext *s)
1482
{
1483
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1484
#if TCG_TARGET_REG_BITS == 64
1485
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1486
#endif
1487
    tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1488
                     (1 << TCG_REG_G1) |
1489
                     (1 << TCG_REG_G2) |
1490
                     (1 << TCG_REG_G3) |
1491
                     (1 << TCG_REG_G4) |
1492
                     (1 << TCG_REG_G5) |
1493
                     (1 << TCG_REG_G6) |
1494
                     (1 << TCG_REG_G7) |
1495
                     (1 << TCG_REG_O0) |
1496
                     (1 << TCG_REG_O1) |
1497
                     (1 << TCG_REG_O2) |
1498
                     (1 << TCG_REG_O3) |
1499
                     (1 << TCG_REG_O4) |
1500
                     (1 << TCG_REG_O5) |
1501
                     (1 << TCG_REG_O7));
1502

    
1503
    tcg_regset_clear(s->reserved_regs);
1504
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0);
1505
#if TCG_TARGET_REG_BITS == 64
1506
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I4); // for internal use
1507
#endif
1508
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I5); // for internal use
1509
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6);
1510
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7);
1511
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6);
1512
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O7);
1513
    tcg_add_target_add_op_defs(sparc_op_defs);
1514
}
1515

    
1516
#if TCG_TARGET_REG_BITS == 64
1517
# define ELF_HOST_MACHINE  EM_SPARCV9
1518
#else
1519
# define ELF_HOST_MACHINE  EM_SPARC32PLUS
1520
# define ELF_HOST_FLAGS    EF_SPARC_32PLUS
1521
#endif
1522

    
1523
typedef struct {
1524
    uint32_t len __attribute__((aligned((sizeof(void *)))));
1525
    uint32_t id;
1526
    uint8_t version;
1527
    char augmentation[1];
1528
    uint8_t code_align;
1529
    uint8_t data_align;
1530
    uint8_t return_column;
1531
} DebugFrameCIE;
1532

    
1533
typedef struct {
1534
    uint32_t len __attribute__((aligned((sizeof(void *)))));
1535
    uint32_t cie_offset;
1536
    tcg_target_long func_start __attribute__((packed));
1537
    tcg_target_long func_len __attribute__((packed));
1538
    uint8_t def_cfa[TCG_TARGET_REG_BITS == 64 ? 4 : 2];
1539
    uint8_t win_save;
1540
    uint8_t ret_save[3];
1541
} DebugFrameFDE;
1542

    
1543
typedef struct {
1544
    DebugFrameCIE cie;
1545
    DebugFrameFDE fde;
1546
} DebugFrame;
1547

    
1548
static DebugFrame debug_frame = {
1549
    .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1550
    .cie.id = -1,
1551
    .cie.version = 1,
1552
    .cie.code_align = 1,
1553
    .cie.data_align = -sizeof(void *) & 0x7f,
1554
    .cie.return_column = 15,            /* o7 */
1555

    
1556
    .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
1557
    .fde.def_cfa = {
1558
#if TCG_TARGET_REG_BITS == 64
1559
        12, 30,                         /* DW_CFA_def_cfa i6, 2047 */
1560
        (2047 & 0x7f) | 0x80, (2047 >> 7)
1561
#else
1562
        13, 30                          /* DW_CFA_def_cfa_register i6 */
1563
#endif
1564
    },
1565
    .fde.win_save = 0x2d,               /* DW_CFA_GNU_window_save */
1566
    .fde.ret_save = { 9, 15, 31 },      /* DW_CFA_register o7, i7 */
1567
};
1568

    
1569
void tcg_register_jit(void *buf, size_t buf_size)
1570
{
1571
    debug_frame.fde.func_start = (tcg_target_long) buf;
1572
    debug_frame.fde.func_len = buf_size;
1573

    
1574
    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1575
}