Statistics
| Branch: | Revision:

root / tcg / sparc / tcg-target.c @ cf7c2ca5

History | View | Annotate | Download (22.2 kB)

1
/*
2
 * Tiny Code Generator for QEMU
3
 *
4
 * Copyright (c) 2008 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
26
    "%g0",
27
    "%g1",
28
    "%g2",
29
    "%g3",
30
    "%g4",
31
    "%g5",
32
    "%g6",
33
    "%g7",
34
    "%o0",
35
    "%o1",
36
    "%o2",
37
    "%o3",
38
    "%o4",
39
    "%o5",
40
    "%o6",
41
    "%o7",
42
    "%l0",
43
    "%l1",
44
    "%l2",
45
    "%l3",
46
    "%l4",
47
    "%l5",
48
    "%l6",
49
    "%l7",
50
    "%i0",
51
    "%i1",
52
    "%i2",
53
    "%i3",
54
    "%i4",
55
    "%i5",
56
    "%i6",
57
    "%i7",
58
};
59

    
60
static const int tcg_target_reg_alloc_order[] = {
61
    TCG_REG_L0,
62
    TCG_REG_L1,
63
    TCG_REG_L2,
64
    TCG_REG_L3,
65
    TCG_REG_L4,
66
    TCG_REG_L5,
67
    TCG_REG_L6,
68
    TCG_REG_L7,
69
    TCG_REG_I0,
70
    TCG_REG_I1,
71
    TCG_REG_I2,
72
    TCG_REG_I3,
73
    TCG_REG_I4,
74
};
75

    
76
static const int tcg_target_call_iarg_regs[6] = {
77
    TCG_REG_O0,
78
    TCG_REG_O1,
79
    TCG_REG_O2,
80
    TCG_REG_O3,
81
    TCG_REG_O4,
82
    TCG_REG_O5,
83
};
84

    
85
static const int tcg_target_call_oarg_regs[2] = {
86
    TCG_REG_O0,
87
    TCG_REG_O1,
88
};
89

    
90
static void patch_reloc(uint8_t *code_ptr, int type,
91
                        tcg_target_long value, tcg_target_long addend)
92
{
93
    value += addend;
94
    switch (type) {
95
    case R_SPARC_32:
96
        if (value != (uint32_t)value)
97
            tcg_abort();
98
        *(uint32_t *)code_ptr = value;
99
        break;
100
    default:
101
        tcg_abort();
102
    }
103
}
104

    
105
/* maximum number of register used for input function arguments */
106
static inline int tcg_target_get_call_iarg_regs_count(int flags)
107
{
108
    return 6;
109
}
110

    
111
/* parse target specific constraints */
112
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
113
{
114
    const char *ct_str;
115

    
116
    ct_str = *pct_str;
117
    switch (ct_str[0]) {
118
    case 'r':
119
    case 'L': /* qemu_ld/st constraint */
120
        ct->ct |= TCG_CT_REG;
121
        tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
122
        break;
123
    case 'I':
124
        ct->ct |= TCG_CT_CONST_S11;
125
        break;
126
    case 'J':
127
        ct->ct |= TCG_CT_CONST_S13;
128
        break;
129
    default:
130
        return -1;
131
    }
132
    ct_str++;
133
    *pct_str = ct_str;
134
    return 0;
135
}
136

    
137
static inline int check_fit(tcg_target_long val, unsigned int bits)
138
{
139
    return ((val << ((sizeof(tcg_target_long) * 8 - bits))
140
             >> (sizeof(tcg_target_long) * 8 - bits)) == val);
141
}
142

    
143
/* test if a constant matches the constraint */
144
static inline int tcg_target_const_match(tcg_target_long val,
145
                                         const TCGArgConstraint *arg_ct)
146
{
147
    int ct;
148

    
149
    ct = arg_ct->ct;
150
    if (ct & TCG_CT_CONST)
151
        return 1;
152
    else if ((ct & TCG_CT_CONST_S11) && check_fit(val, 11))
153
        return 1;
154
    else if ((ct & TCG_CT_CONST_S13) && check_fit(val, 13))
155
        return 1;
156
    else
157
        return 0;
158
}
159

    
160
#define INSN_OP(x)  ((x) << 30)
161
#define INSN_OP2(x) ((x) << 22)
162
#define INSN_OP3(x) ((x) << 19)
163
#define INSN_OPF(x) ((x) << 5)
164
#define INSN_RD(x)  ((x) << 25)
165
#define INSN_RS1(x) ((x) << 14)
166
#define INSN_RS2(x) (x)
167

    
168
#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
169
#define INSN_OFF22(x) (((x) >> 2) & 0x3fffff)
170

    
171
#define INSN_COND(x, a) (((x) << 25) | ((a) << 29))
172
#define COND_N     0x0
173
#define COND_E     0x1
174
#define COND_LE    0x2
175
#define COND_L     0x3
176
#define COND_LEU   0x4
177
#define COND_CS    0x5
178
#define COND_NEG   0x6
179
#define COND_VS    0x7
180
#define COND_A     0x8
181
#define COND_NE    0x9
182
#define COND_G     0xa
183
#define COND_GE    0xb
184
#define COND_GU    0xc
185
#define COND_CC    0xd
186
#define COND_POS   0xe
187
#define COND_VC    0xf
188
#define BA         (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2))
189

    
190
#define ARITH_ADD  (INSN_OP(2) | INSN_OP3(0x00))
191
#define ARITH_AND  (INSN_OP(2) | INSN_OP3(0x01))
192
#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
193
#define ARITH_OR   (INSN_OP(2) | INSN_OP3(0x02))
194
#define ARITH_XOR  (INSN_OP(2) | INSN_OP3(0x03))
195
#define ARITH_SUB  (INSN_OP(2) | INSN_OP3(0x08))
196
#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x18))
197
#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x10))
198
#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
199
#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
200
#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
201
#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
202
#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
203
#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
204
#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
205

    
206
#define SHIFT_SLL  (INSN_OP(2) | INSN_OP3(0x25))
207
#define SHIFT_SRL  (INSN_OP(2) | INSN_OP3(0x26))
208
#define SHIFT_SRA  (INSN_OP(2) | INSN_OP3(0x27))
209

    
210
#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
211
#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
212
#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
213

    
214
#define WRY        (INSN_OP(2) | INSN_OP3(0x30))
215
#define JMPL       (INSN_OP(2) | INSN_OP3(0x38))
216
#define SAVE       (INSN_OP(2) | INSN_OP3(0x3c))
217
#define RESTORE    (INSN_OP(2) | INSN_OP3(0x3d))
218
#define SETHI      (INSN_OP(0) | INSN_OP2(0x4))
219
#define CALL       INSN_OP(1)
220
#define LDUB       (INSN_OP(3) | INSN_OP3(0x01))
221
#define LDSB       (INSN_OP(3) | INSN_OP3(0x09))
222
#define LDUH       (INSN_OP(3) | INSN_OP3(0x02))
223
#define LDSH       (INSN_OP(3) | INSN_OP3(0x0a))
224
#define LDUW       (INSN_OP(3) | INSN_OP3(0x00))
225
#define LDSW       (INSN_OP(3) | INSN_OP3(0x08))
226
#define LDX        (INSN_OP(3) | INSN_OP3(0x0b))
227
#define STB        (INSN_OP(3) | INSN_OP3(0x05))
228
#define STH        (INSN_OP(3) | INSN_OP3(0x06))
229
#define STW        (INSN_OP(3) | INSN_OP3(0x04))
230
#define STX        (INSN_OP(3) | INSN_OP3(0x0e))
231

    
232
static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
233
{
234
    tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(arg) |
235
              INSN_RS2(TCG_REG_G0));
236
}
237

    
238
static inline void tcg_out_movi(TCGContext *s, TCGType type,
239
                                int ret, tcg_target_long arg)
240
{
241
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
242
    if (!check_fit(arg, 32))
243
        fprintf(stderr, "unimplemented %s with constant %ld\n", __func__, arg);
244
#endif
245
    if (check_fit(arg, 13))
246
        tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(TCG_REG_G0) |
247
                  INSN_IMM13(arg));
248
    else {
249
        tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
250
        if (arg & 0x3ff)
251
            tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(ret) |
252
                      INSN_IMM13(arg & 0x3ff));
253
    }
254
}
255

    
256
static inline void tcg_out_ld_raw(TCGContext *s, int ret,
257
                                  tcg_target_long arg)
258
{
259
    tcg_out32(s, SETHI | INSN_RD(ret) | (((uint32_t)arg & 0xfffffc00) >> 10));
260
    tcg_out32(s, LDUW | INSN_RD(ret) | INSN_RS1(ret) |
261
              INSN_IMM13(arg & 0x3ff));
262
}
263

    
264
static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
265
                                  tcg_target_long arg)
266
{
267
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
268
    if (!check_fit(arg, 32))
269
        fprintf(stderr, "unimplemented %s with offset %ld\n", __func__, arg);
270
    if (!check_fit(arg, 13))
271
        tcg_out32(s, SETHI | INSN_RD(ret) | (((uint32_t)arg & 0xfffffc00) >> 10));
272
    tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(ret) |
273
              INSN_IMM13(arg & 0x3ff));
274
#else
275
    tcg_out_ld_raw(s, ret, arg);
276
#endif
277
}
278

    
279
static inline void tcg_out_ldst(TCGContext *s, int ret, int addr, int offset, int op)
280
{
281
    if (check_fit(offset, 13))
282
        tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
283
                  INSN_IMM13(offset));
284
    else {
285
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, offset);
286
        tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(TCG_REG_I5) |
287
                  INSN_RS2(addr));
288
    }
289
}
290

    
291
static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
292
                              int arg1, tcg_target_long arg2)
293
{
294
    if (type == TCG_TYPE_I32)
295
        tcg_out_ldst(s, ret, arg1, arg2, LDUW);
296
    else
297
        tcg_out_ldst(s, ret, arg1, arg2, LDX);
298
}
299

    
300
static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
301
                              int arg1, tcg_target_long arg2)
302
{
303
    if (type == TCG_TYPE_I32)
304
        tcg_out_ldst(s, arg, arg1, arg2, STW);
305
    else
306
        tcg_out_ldst(s, arg, arg1, arg2, STX);
307
}
308

    
309
static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
310
                                 int op)
311
{
312
    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
313
              INSN_RS2(rs2));
314
}
315

    
316
static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1, int offset,
317
                                  int op)
318
{
319
    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
320
              INSN_IMM13(offset));
321
}
322

    
323
static inline void tcg_out_sety(TCGContext *s, tcg_target_long val)
324
{
325
    if (val == 0 || val == -1)
326
        tcg_out32(s, WRY | INSN_IMM13(val));
327
    else
328
        fprintf(stderr, "unimplemented sety %ld\n", (long)val);
329
}
330

    
331
static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
332
{
333
    if (val != 0) {
334
        if (check_fit(val, 13))
335
            tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
336
        else
337
            fprintf(stderr, "unimplemented addi %ld\n", (long)val);
338
    }
339
}
340

    
341
static inline void tcg_out_nop(TCGContext *s)
342
{
343
    tcg_out32(s, SETHI | INSN_RD(TCG_REG_G0) | 0);
344
}
345

    
346
static void tcg_out_branch(TCGContext *s, int opc, int label_index)
347
{
348
    int32_t val;
349
    TCGLabel *l = &s->labels[label_index];
350

    
351
    if (l->has_value) {
352
        val = l->u.value - (tcg_target_long)s->code_ptr;
353
        tcg_out32(s, (INSN_OP(0) | opc | INSN_OP2(0x2)
354
                      | INSN_OFF22(l->u.value - (unsigned long)s->code_ptr)));
355
    } else
356
        fprintf(stderr, "unimplemented branch\n");
357
}
358

    
359
static const uint8_t tcg_cond_to_bcond[10] = {
360
    [TCG_COND_EQ] = COND_E,
361
    [TCG_COND_NE] = COND_NE,
362
    [TCG_COND_LT] = COND_L,
363
    [TCG_COND_GE] = COND_GE,
364
    [TCG_COND_LE] = COND_LE,
365
    [TCG_COND_GT] = COND_G,
366
    [TCG_COND_LTU] = COND_CS,
367
    [TCG_COND_GEU] = COND_CC,
368
    [TCG_COND_LEU] = COND_LEU,
369
    [TCG_COND_GTU] = COND_GU,
370
};
371

    
372
static void tcg_out_brcond(TCGContext *s, int cond,
373
                           TCGArg arg1, TCGArg arg2, int const_arg2,
374
                           int label_index)
375
{
376
    if (const_arg2 && arg2 == 0)
377
        /* andcc r, r, %g0 */
378
        tcg_out_arithi(s, TCG_REG_G0, arg1, arg1, ARITH_ANDCC);
379
    else
380
        /* subcc r1, r2, %g0 */
381
        tcg_out_arith(s, TCG_REG_G0, arg1, arg2, ARITH_SUBCC);
382
    tcg_out_branch(s, tcg_cond_to_bcond[cond], label_index);
383
    tcg_out_nop(s);
384
}
385

    
386
/* Generate global QEMU prologue and epilogue code */
387
void tcg_target_qemu_prologue(TCGContext *s)
388
{
389
    tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
390
              INSN_IMM13(-TCG_TARGET_STACK_MINFRAME));
391
    tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I0) |
392
              INSN_RS2(TCG_REG_G0));
393
    tcg_out_nop(s);
394
}
395

    
396
static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
397
                              const int *const_args)
398
{
399
    int c;
400

    
401
    switch (opc) {
402
    case INDEX_op_exit_tb:
403
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
404
        tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
405
                  INSN_IMM13(8));
406
        tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
407
                      INSN_RS2(TCG_REG_G0));
408
        break;
409
    case INDEX_op_goto_tb:
410
        if (s->tb_jmp_offset) {
411
            /* direct jump method */
412
            tcg_out32(s, SETHI | INSN_RD(TCG_REG_I5) |
413
                      ((args[0] & 0xffffe000) >> 10));
414
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
415
                      INSN_IMM13((args[0] & 0x1fff)));
416
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
417
        } else {
418
            /* indirect jump method */
419
            tcg_out_ld_ptr(s, TCG_REG_I5, (tcg_target_long)(s->tb_next + args[0]));
420
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
421
                      INSN_RS2(TCG_REG_G0));
422
        }
423
        tcg_out_nop(s);
424
        s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
425
        break;
426
    case INDEX_op_call:
427
        if (const_args[0]) {
428
            tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
429
                                  - (tcg_target_ulong)s->code_ptr) >> 2)
430
                                 & 0x3fffffff));
431
            tcg_out_nop(s);
432
        } else {
433
            tcg_out_ld_ptr(s, TCG_REG_I5, (tcg_target_long)(s->tb_next + args[0]));
434
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_I5) |
435
                      INSN_RS2(TCG_REG_G0));
436
            tcg_out_nop(s);
437
        }
438
        break;
439
    case INDEX_op_jmp:
440
        fprintf(stderr, "unimplemented jmp\n");
441
        break;
442
    case INDEX_op_br:
443
        fprintf(stderr, "unimplemented br\n");
444
        break;
445
    case INDEX_op_movi_i32:
446
        tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
447
        break;
448

    
449
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
450
#define OP_32_64(x)                             \
451
        glue(glue(case INDEX_op_, x), _i32:)    \
452
        glue(glue(case INDEX_op_, x), _i64:)
453
#else
454
#define OP_32_64(x)                             \
455
        glue(glue(case INDEX_op_, x), _i32:)
456
#endif
457
        OP_32_64(ld8u);
458
        tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
459
        break;
460
        OP_32_64(ld8s);
461
        tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
462
        break;
463
        OP_32_64(ld16u);
464
        tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
465
        break;
466
        OP_32_64(ld16s);
467
        tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
468
        break;
469
    case INDEX_op_ld_i32:
470
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
471
    case INDEX_op_ld32u_i64:
472
#endif
473
        tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
474
        break;
475
        OP_32_64(st8);
476
        tcg_out_ldst(s, args[0], args[1], args[2], STB);
477
        break;
478
        OP_32_64(st16);
479
        tcg_out_ldst(s, args[0], args[1], args[2], STH);
480
        break;
481
    case INDEX_op_st_i32:
482
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
483
    case INDEX_op_st32_i64:
484
#endif
485
        tcg_out_ldst(s, args[0], args[1], args[2], STW);
486
        break;
487
        OP_32_64(add);
488
        c = ARITH_ADD;
489
        goto gen_arith32;
490
        OP_32_64(sub);
491
        c = ARITH_SUB;
492
        goto gen_arith32;
493
        OP_32_64(and);
494
        c = ARITH_AND;
495
        goto gen_arith32;
496
        OP_32_64(or);
497
        c = ARITH_OR;
498
        goto gen_arith32;
499
        OP_32_64(xor);
500
        c = ARITH_XOR;
501
        goto gen_arith32;
502
    case INDEX_op_shl_i32:
503
        c = SHIFT_SLL;
504
        goto gen_arith32;
505
    case INDEX_op_shr_i32:
506
        c = SHIFT_SRL;
507
        goto gen_arith32;
508
    case INDEX_op_sar_i32:
509
        c = SHIFT_SRA;
510
        goto gen_arith32;
511
    case INDEX_op_mul_i32:
512
        c = ARITH_UMUL;
513
        goto gen_arith32;
514
    case INDEX_op_div2_i32:
515
#if defined(__sparc_v9__) || defined(__sparc_v8plus__)
516
        c = ARITH_SDIVX;
517
        goto gen_arith32;
518
#else
519
        tcg_out_sety(s, 0);
520
        c = ARITH_SDIV;
521
        goto gen_arith32;
522
#endif
523
    case INDEX_op_divu2_i32:
524
#if defined(__sparc_v9__) || defined(__sparc_v8plus__)
525
        c = ARITH_UDIVX;
526
        goto gen_arith32;
527
#else
528
        tcg_out_sety(s, 0);
529
        c = ARITH_UDIV;
530
        goto gen_arith32;
531
#endif
532

    
533
    case INDEX_op_brcond_i32:
534
        tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
535
                       args[3]);
536
        break;
537

    
538
    case INDEX_op_qemu_ld8u:
539
        fprintf(stderr, "unimplemented qld\n");
540
        break;
541
    case INDEX_op_qemu_ld8s:
542
        fprintf(stderr, "unimplemented qld\n");
543
        break;
544
    case INDEX_op_qemu_ld16u:
545
        fprintf(stderr, "unimplemented qld\n");
546
        break;
547
    case INDEX_op_qemu_ld16s:
548
        fprintf(stderr, "unimplemented qld\n");
549
        break;
550
    case INDEX_op_qemu_ld32u:
551
        fprintf(stderr, "unimplemented qld\n");
552
        break;
553
    case INDEX_op_qemu_ld32s:
554
        fprintf(stderr, "unimplemented qld\n");
555
        break;
556
    case INDEX_op_qemu_st8:
557
        fprintf(stderr, "unimplemented qst\n");
558
        break;
559
    case INDEX_op_qemu_st16:
560
        fprintf(stderr, "unimplemented qst\n");
561
        break;
562
    case INDEX_op_qemu_st32:
563
        fprintf(stderr, "unimplemented qst\n");
564
        break;
565

    
566
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
567
    case INDEX_op_movi_i64:
568
        tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
569
        break;
570
    case INDEX_op_ld32s_i64:
571
        tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
572
        break;
573
    case INDEX_op_ld_i64:
574
        tcg_out_ldst(s, args[0], args[1], args[2], LDX);
575
        break;
576
    case INDEX_op_st_i64:
577
        tcg_out_ldst(s, args[0], args[1], args[2], STX);
578
        break;
579
    case INDEX_op_shl_i64:
580
        c = SHIFT_SLLX;
581
        goto gen_arith32;
582
    case INDEX_op_shr_i64:
583
        c = SHIFT_SRLX;
584
        goto gen_arith32;
585
    case INDEX_op_sar_i64:
586
        c = SHIFT_SRAX;
587
        goto gen_arith32;
588
    case INDEX_op_mul_i64:
589
        c = ARITH_MULX;
590
        goto gen_arith32;
591
    case INDEX_op_div2_i64:
592
        c = ARITH_SDIVX;
593
        goto gen_arith32;
594
    case INDEX_op_divu2_i64:
595
        c = ARITH_UDIVX;
596
        goto gen_arith32;
597

    
598
    case INDEX_op_brcond_i64:
599
        fprintf(stderr, "unimplemented brcond\n");
600
        break;
601
    case INDEX_op_qemu_ld64:
602
        fprintf(stderr, "unimplemented qld\n");
603
        break;
604
    case INDEX_op_qemu_st64:
605
        fprintf(stderr, "unimplemented qst\n");
606
        break;
607

    
608
#endif
609
    gen_arith32:
610
        if (const_args[2]) {
611
            tcg_out_arithi(s, args[0], args[1], args[2], c);
612
        } else {
613
            tcg_out_arith(s, args[0], args[1], args[2], c);
614
        }
615
        break;
616

    
617
    default:
618
        fprintf(stderr, "unknown opcode 0x%x\n", opc);
619
        tcg_abort();
620
    }
621
}
622

    
623
static const TCGTargetOpDef sparc_op_defs[] = {
624
    { INDEX_op_exit_tb, { } },
625
    { INDEX_op_goto_tb, { } },
626
    { INDEX_op_call, { "ri" } },
627
    { INDEX_op_jmp, { "ri" } },
628
    { INDEX_op_br, { } },
629

    
630
    { INDEX_op_mov_i32, { "r", "r" } },
631
    { INDEX_op_movi_i32, { "r" } },
632
    { INDEX_op_ld8u_i32, { "r", "r" } },
633
    { INDEX_op_ld8s_i32, { "r", "r" } },
634
    { INDEX_op_ld16u_i32, { "r", "r" } },
635
    { INDEX_op_ld16s_i32, { "r", "r" } },
636
    { INDEX_op_ld_i32, { "r", "r" } },
637
    { INDEX_op_st8_i32, { "r", "r" } },
638
    { INDEX_op_st16_i32, { "r", "r" } },
639
    { INDEX_op_st_i32, { "r", "r" } },
640

    
641
    { INDEX_op_add_i32, { "r", "r", "rJ" } },
642
    { INDEX_op_mul_i32, { "r", "r", "rJ" } },
643
    { INDEX_op_div2_i32, { "r", "r", "0", "1", "r" } },
644
    { INDEX_op_divu2_i32, { "r", "r", "0", "1", "r" } },
645
    { INDEX_op_sub_i32, { "r", "r", "rJ" } },
646
    { INDEX_op_and_i32, { "r", "r", "rJ" } },
647
    { INDEX_op_or_i32, { "r", "r", "rJ" } },
648
    { INDEX_op_xor_i32, { "r", "r", "rJ" } },
649

    
650
    { INDEX_op_shl_i32, { "r", "r", "rJ" } },
651
    { INDEX_op_shr_i32, { "r", "r", "rJ" } },
652
    { INDEX_op_sar_i32, { "r", "r", "rJ" } },
653

    
654
    { INDEX_op_brcond_i32, { "r", "ri" } },
655

    
656
    { INDEX_op_qemu_ld8u, { "r", "L" } },
657
    { INDEX_op_qemu_ld8s, { "r", "L" } },
658
    { INDEX_op_qemu_ld16u, { "r", "L" } },
659
    { INDEX_op_qemu_ld16s, { "r", "L" } },
660
    { INDEX_op_qemu_ld32u, { "r", "L" } },
661
    { INDEX_op_qemu_ld32s, { "r", "L" } },
662

    
663
    { INDEX_op_qemu_st8, { "L", "L" } },
664
    { INDEX_op_qemu_st16, { "L", "L" } },
665
    { INDEX_op_qemu_st32, { "L", "L" } },
666

    
667
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
668
    { INDEX_op_mov_i64, { "r", "r" } },
669
    { INDEX_op_movi_i64, { "r" } },
670
    { INDEX_op_ld8u_i64, { "r", "r" } },
671
    { INDEX_op_ld8s_i64, { "r", "r" } },
672
    { INDEX_op_ld16u_i64, { "r", "r" } },
673
    { INDEX_op_ld16s_i64, { "r", "r" } },
674
    { INDEX_op_ld32u_i64, { "r", "r" } },
675
    { INDEX_op_ld32s_i64, { "r", "r" } },
676
    { INDEX_op_ld_i64, { "r", "r" } },
677
    { INDEX_op_st8_i64, { "r", "r" } },
678
    { INDEX_op_st16_i64, { "r", "r" } },
679
    { INDEX_op_st32_i64, { "r", "r" } },
680
    { INDEX_op_st_i64, { "r", "r" } },
681

    
682
    { INDEX_op_add_i64, { "r", "r", "rJ" } },
683
    { INDEX_op_mul_i64, { "r", "r", "rJ" } },
684
    { INDEX_op_div2_i64, { "r", "r", "0", "1", "r" } },
685
    { INDEX_op_divu2_i64, { "r", "r", "0", "1", "r" } },
686
    { INDEX_op_sub_i64, { "r", "r", "rJ" } },
687
    { INDEX_op_and_i64, { "r", "r", "rJ" } },
688
    { INDEX_op_or_i64, { "r", "r", "rJ" } },
689
    { INDEX_op_xor_i64, { "r", "r", "rJ" } },
690

    
691
    { INDEX_op_shl_i64, { "r", "r", "rJ" } },
692
    { INDEX_op_shr_i64, { "r", "r", "rJ" } },
693
    { INDEX_op_sar_i64, { "r", "r", "rJ" } },
694

    
695
    { INDEX_op_brcond_i64, { "r", "ri" } },
696
#endif
697
    { -1 },
698
};
699

    
700
void tcg_target_init(TCGContext *s)
701
{
702
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
703
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
704
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
705
#endif
706
    tcg_regset_set32(tcg_target_call_clobber_regs, 0,
707
                     (1 << TCG_REG_G1) |
708
                     (1 << TCG_REG_G2) |
709
                     (1 << TCG_REG_G3) |
710
                     (1 << TCG_REG_G4) |
711
                     (1 << TCG_REG_G5) |
712
                     (1 << TCG_REG_G6) |
713
                     (1 << TCG_REG_G7) |
714
                     (1 << TCG_REG_O0) |
715
                     (1 << TCG_REG_O1) |
716
                     (1 << TCG_REG_O2) |
717
                     (1 << TCG_REG_O3) |
718
                     (1 << TCG_REG_O4) |
719
                     (1 << TCG_REG_O5) |
720
                     (1 << TCG_REG_O7));
721

    
722
    tcg_regset_clear(s->reserved_regs);
723
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0);
724
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I5); // for internal use
725
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6);
726
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7);
727
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6);
728
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O7);
729
    tcg_add_target_add_op_defs(sparc_op_defs);
730
}