Statistics
| Branch: | Revision:

root / tcg / sparc / tcg-target.c @ f02ca5cb

History | View | Annotate | Download (20.6 kB)

1
/*
2
 * Tiny Code Generator for QEMU
3
 *
4
 * Copyright (c) 2008 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
26
    "%g0",
27
    "%g1",
28
    "%g2",
29
    "%g3",
30
    "%g4",
31
    "%g5",
32
    "%g6",
33
    "%g7",
34
    "%o0",
35
    "%o1",
36
    "%o2",
37
    "%o3",
38
    "%o4",
39
    "%o5",
40
    "%o6",
41
    "%o7",
42
    "%l0",
43
    "%l1",
44
    "%l2",
45
    "%l3",
46
    "%l4",
47
    "%l5",
48
    "%l6",
49
    "%l7",
50
    "%i0",
51
    "%i1",
52
    "%i2",
53
    "%i3",
54
    "%i4",
55
    "%i5",
56
    "%i6",
57
    "%i7",
58
};
59

    
60
static const int tcg_target_reg_alloc_order[] = {
61
    TCG_REG_L0,
62
    TCG_REG_L1,
63
    TCG_REG_L2,
64
    TCG_REG_L3,
65
    TCG_REG_L4,
66
    TCG_REG_L5,
67
    TCG_REG_L6,
68
    TCG_REG_L7,
69
    TCG_REG_I0,
70
    TCG_REG_I1,
71
    TCG_REG_I2,
72
    TCG_REG_I3,
73
    TCG_REG_I4,
74
};
75

    
76
static const int tcg_target_call_iarg_regs[6] = {
77
    TCG_REG_O0,
78
    TCG_REG_O1,
79
    TCG_REG_O2,
80
    TCG_REG_O3,
81
    TCG_REG_O4,
82
    TCG_REG_O5,
83
};
84

    
85
static const int tcg_target_call_oarg_regs[2] = {
86
    TCG_REG_O0,
87
    TCG_REG_O1,
88
};
89

    
90
static void patch_reloc(uint8_t *code_ptr, int type,
91
                        tcg_target_long value, tcg_target_long addend)
92
{
93
    value += addend;
94
    switch (type) {
95
    case R_SPARC_32:
96
        if (value != (uint32_t)value)
97
            tcg_abort();
98
        *(uint32_t *)code_ptr = value;
99
        break;
100
    default:
101
        tcg_abort();
102
    }
103
}
104

    
105
/* maximum number of register used for input function arguments */
106
static inline int tcg_target_get_call_iarg_regs_count(int flags)
107
{
108
    return 6;
109
}
110

    
111
/* parse target specific constraints */
112
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
113
{
114
    const char *ct_str;
115

    
116
    ct_str = *pct_str;
117
    switch (ct_str[0]) {
118
    case 'r':
119
    case 'L': /* qemu_ld/st constraint */
120
        ct->ct |= TCG_CT_REG;
121
        tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
122
        break;
123
    case 'I':
124
        ct->ct |= TCG_CT_CONST_S11;
125
        break;
126
    case 'J':
127
        ct->ct |= TCG_CT_CONST_S13;
128
        break;
129
    default:
130
        return -1;
131
    }
132
    ct_str++;
133
    *pct_str = ct_str;
134
    return 0;
135
}
136

    
137
static inline int check_fit(tcg_target_long val, unsigned int bits)
138
{
139
    return ((val << ((sizeof(tcg_target_long) * 8 - bits))
140
             >> (sizeof(tcg_target_long) * 8 - bits)) == val);
141
}
142

    
143
/* test if a constant matches the constraint */
144
static inline int tcg_target_const_match(tcg_target_long val,
145
                                         const TCGArgConstraint *arg_ct)
146
{
147
    int ct;
148

    
149
    ct = arg_ct->ct;
150
    if (ct & TCG_CT_CONST)
151
        return 1;
152
    else if ((ct & TCG_CT_CONST_S11) && check_fit(val, 11))
153
        return 1;
154
    else if ((ct & TCG_CT_CONST_S13) && check_fit(val, 13))
155
        return 1;
156
    else
157
        return 0;
158
}
159

    
160
#define INSN_OP(x)  ((x) << 30)
161
#define INSN_OP2(x) ((x) << 22)
162
#define INSN_OP3(x) ((x) << 19)
163
#define INSN_OPF(x) ((x) << 5)
164
#define INSN_RD(x)  ((x) << 25)
165
#define INSN_RS1(x) ((x) << 14)
166
#define INSN_RS2(x) (x)
167

    
168
#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
169
#define INSN_OFF22(x) (((x) >> 2) & 0x3fffff)
170

    
171
#define INSN_COND(x, a) (((x) << 25) | ((a) << 29))
172
#define COND_A     0x8
173
#define BA         (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2))
174

    
175
#define ARITH_ADD  (INSN_OP(2) | INSN_OP3(0x00))
176
#define ARITH_AND  (INSN_OP(2) | INSN_OP3(0x01))
177
#define ARITH_OR   (INSN_OP(2) | INSN_OP3(0x02))
178
#define ARITH_XOR  (INSN_OP(2) | INSN_OP3(0x03))
179
#define ARITH_SUB  (INSN_OP(2) | INSN_OP3(0x08))
180
#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x10))
181
#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
182
#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
183
#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
184
#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
185
#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
186
#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
187
#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
188

    
189
#define SHIFT_SLL  (INSN_OP(2) | INSN_OP3(0x25))
190
#define SHIFT_SRL  (INSN_OP(2) | INSN_OP3(0x26))
191
#define SHIFT_SRA  (INSN_OP(2) | INSN_OP3(0x27))
192

    
193
#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
194
#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
195
#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
196

    
197
#define WRY        (INSN_OP(2) | INSN_OP3(0x30))
198
#define JMPL       (INSN_OP(2) | INSN_OP3(0x38))
199
#define SAVE       (INSN_OP(2) | INSN_OP3(0x3c))
200
#define RESTORE    (INSN_OP(2) | INSN_OP3(0x3d))
201
#define SETHI      (INSN_OP(0) | INSN_OP2(0x4))
202
#define CALL       INSN_OP(1)
203
#define LDUB       (INSN_OP(3) | INSN_OP3(0x01))
204
#define LDSB       (INSN_OP(3) | INSN_OP3(0x09))
205
#define LDUH       (INSN_OP(3) | INSN_OP3(0x02))
206
#define LDSH       (INSN_OP(3) | INSN_OP3(0x0a))
207
#define LDUW       (INSN_OP(3) | INSN_OP3(0x00))
208
#define LDSW       (INSN_OP(3) | INSN_OP3(0x08))
209
#define LDX        (INSN_OP(3) | INSN_OP3(0x0b))
210
#define STB        (INSN_OP(3) | INSN_OP3(0x05))
211
#define STH        (INSN_OP(3) | INSN_OP3(0x06))
212
#define STW        (INSN_OP(3) | INSN_OP3(0x04))
213
#define STX        (INSN_OP(3) | INSN_OP3(0x0e))
214

    
215
static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
216
{
217
    tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(arg) |
218
              INSN_RS2(TCG_REG_G0));
219
}
220

    
221
static inline void tcg_out_movi(TCGContext *s, TCGType type,
222
                                int ret, tcg_target_long arg)
223
{
224
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
225
    if (!check_fit(arg, 32))
226
        fprintf(stderr, "unimplemented %s with constant %ld\n", __func__, arg);
227
#endif
228
    if (check_fit(arg, 13))
229
        tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(TCG_REG_G0) |
230
                  INSN_IMM13(arg));
231
    else {
232
        tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
233
        if (arg & 0x3ff)
234
            tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(ret) |
235
                      INSN_IMM13(arg & 0x3ff));
236
    }
237
}
238

    
239
static inline void tcg_out_ld_raw(TCGContext *s, int ret,
240
                                  tcg_target_long arg)
241
{
242
    tcg_out32(s, SETHI | INSN_RD(ret) | (((uint32_t)arg & 0xfffffc00) >> 10));
243
    tcg_out32(s, LDUW | INSN_RD(ret) | INSN_RS1(ret) |
244
              INSN_IMM13(arg & 0x3ff));
245
}
246

    
247
static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
248
                                  tcg_target_long arg)
249
{
250
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
251
    if (!check_fit(arg, 32))
252
        fprintf(stderr, "unimplemented %s with offset %ld\n", __func__, arg);
253
    if (!check_fit(arg, 13))
254
        tcg_out32(s, SETHI | INSN_RD(ret) | (((uint32_t)arg & 0xfffffc00) >> 10));
255
    tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(ret) |
256
              INSN_IMM13(arg & 0x3ff));
257
#else
258
    tcg_out_ld_raw(s, ret, arg);
259
#endif
260
}
261

    
262
static inline void tcg_out_ldst(TCGContext *s, int ret, int addr, int offset, int op)
263
{
264
    if (check_fit(offset, 13))
265
        tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
266
                  INSN_IMM13(offset));
267
    else
268
        fprintf(stderr, "unimplemented %s with offset %d\n", __func__, offset);
269
}
270

    
271
static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
272
                              int arg1, tcg_target_long arg2)
273
{
274
    if (type == TCG_TYPE_I32)
275
        tcg_out_ldst(s, ret, arg1, arg2, LDUW);
276
    else
277
        tcg_out_ldst(s, ret, arg1, arg2, LDX);
278
}
279

    
280
static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
281
                              int arg1, tcg_target_long arg2)
282
{
283
    if (type == TCG_TYPE_I32)
284
        tcg_out_ldst(s, arg, arg1, arg2, STW);
285
    else
286
        tcg_out_ldst(s, arg, arg1, arg2, STX);
287
}
288

    
289
static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
290
                                 int op)
291
{
292
    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
293
              INSN_RS2(rs2));
294
}
295

    
296
static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1, int offset,
297
                                  int op)
298
{
299
    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
300
              INSN_IMM13(offset));
301
}
302

    
303
static inline void tcg_out_sety(TCGContext *s, tcg_target_long val)
304
{
305
    if (val == 0 || val == -1)
306
        tcg_out32(s, WRY | INSN_IMM13(val));
307
    else
308
        fprintf(stderr, "unimplemented sety %ld\n", (long)val);
309
}
310

    
311
static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
312
{
313
    if (val != 0) {
314
        if (check_fit(val, 13))
315
            tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
316
        else
317
            fprintf(stderr, "unimplemented addi %ld\n", (long)val);
318
    }
319
}
320

    
321
static inline void tcg_out_nop(TCGContext *s)
322
{
323
    tcg_out32(s, SETHI | INSN_RD(TCG_REG_G0) | 0);
324
}
325

    
326
/* Generate global QEMU prologue and epilogue code */
327
void tcg_target_qemu_prologue(TCGContext *s)
328
{
329
    tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
330
              INSN_IMM13(-TCG_TARGET_STACK_MINFRAME));
331
    tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_O0) |
332
              INSN_RS2(TCG_REG_G0));
333
    tcg_out_nop(s);
334
}
335

    
336
static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
337
                              const int *const_args)
338
{
339
    int c;
340

    
341
    switch (opc) {
342
    case INDEX_op_exit_tb:
343
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
344
        tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
345
                  INSN_IMM13(8));
346
        tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
347
                      INSN_RS2(TCG_REG_G0));
348
        break;
349
    case INDEX_op_goto_tb:
350
        if (s->tb_jmp_offset) {
351
            /* direct jump method */
352
            if (check_fit(args[0] - (unsigned long)s->code_ptr, 26)) {
353
                tcg_out32(s, BA |
354
                          INSN_OFF22(args[0] - (unsigned long)s->code_ptr));
355
            } else {
356
                tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, args[0]);
357
                tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
358
                          INSN_RS2(TCG_REG_G0));
359
            }
360
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
361
        } else {
362
            /* indirect jump method */
363
            tcg_out_ld_ptr(s, TCG_REG_I5, (tcg_target_long)(s->tb_next + args[0]));
364
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
365
                      INSN_RS2(TCG_REG_G0));
366
        }
367
        tcg_out_nop(s);
368
        s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
369
        break;
370
    case INDEX_op_call:
371
        if (const_args[0]) {
372
            tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
373
                                  - (tcg_target_ulong)s->code_ptr) >> 2)
374
                                 & 0x3fffffff));
375
            tcg_out_nop(s);
376
        } else {
377
            tcg_out_ld_ptr(s, TCG_REG_O7, (tcg_target_long)(s->tb_next + args[0]));
378
            tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_O7) |
379
                      INSN_RS2(TCG_REG_G0));
380
            tcg_out_nop(s);
381
        }
382
        break;
383
    case INDEX_op_jmp:
384
        fprintf(stderr, "unimplemented jmp\n");
385
        break;
386
    case INDEX_op_br:
387
        fprintf(stderr, "unimplemented br\n");
388
        break;
389
    case INDEX_op_movi_i32:
390
        tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
391
        break;
392

    
393
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
394
#define OP_32_64(x)                             \
395
        glue(glue(case INDEX_op_, x), _i32:)    \
396
        glue(glue(case INDEX_op_, x), _i64:)
397
#else
398
#define OP_32_64(x)                             \
399
        glue(glue(case INDEX_op_, x), _i32:)
400
#endif
401
        OP_32_64(ld8u);
402
        tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
403
        break;
404
        OP_32_64(ld8s);
405
        tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
406
        break;
407
        OP_32_64(ld16u);
408
        tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
409
        break;
410
        OP_32_64(ld16s);
411
        tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
412
        break;
413
    case INDEX_op_ld_i32:
414
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
415
    case INDEX_op_ld32u_i64:
416
#endif
417
        tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
418
        break;
419
        OP_32_64(st8);
420
        tcg_out_ldst(s, args[0], args[1], args[2], STB);
421
        break;
422
        OP_32_64(st16);
423
        tcg_out_ldst(s, args[0], args[1], args[2], STH);
424
        break;
425
    case INDEX_op_st_i32:
426
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
427
    case INDEX_op_st32_i64:
428
#endif
429
        tcg_out_ldst(s, args[0], args[1], args[2], STW);
430
        break;
431
        OP_32_64(add);
432
        c = ARITH_ADD;
433
        goto gen_arith32;
434
        OP_32_64(sub);
435
        c = ARITH_SUB;
436
        goto gen_arith32;
437
        OP_32_64(and);
438
        c = ARITH_AND;
439
        goto gen_arith32;
440
        OP_32_64(or);
441
        c = ARITH_OR;
442
        goto gen_arith32;
443
        OP_32_64(xor);
444
        c = ARITH_XOR;
445
        goto gen_arith32;
446
    case INDEX_op_shl_i32:
447
        c = SHIFT_SLL;
448
        goto gen_arith32;
449
    case INDEX_op_shr_i32:
450
        c = SHIFT_SRL;
451
        goto gen_arith32;
452
    case INDEX_op_sar_i32:
453
        c = SHIFT_SRA;
454
        goto gen_arith32;
455
    case INDEX_op_mul_i32:
456
        c = ARITH_UMUL;
457
        goto gen_arith32;
458
    case INDEX_op_div2_i32:
459
#if defined(__sparc_v9__) || defined(__sparc_v8plus__)
460
        c = ARITH_SDIVX;
461
        goto gen_arith32;
462
#else
463
        tcg_out_sety(s, 0);
464
        c = ARITH_SDIV;
465
        goto gen_arith32;
466
#endif
467
    case INDEX_op_divu2_i32:
468
#if defined(__sparc_v9__) || defined(__sparc_v8plus__)
469
        c = ARITH_UDIVX;
470
        goto gen_arith32;
471
#else
472
        tcg_out_sety(s, 0);
473
        c = ARITH_UDIV;
474
        goto gen_arith32;
475
#endif
476

    
477
    case INDEX_op_brcond_i32:
478
        fprintf(stderr, "unimplemented brcond\n");
479
        break;
480

    
481
    case INDEX_op_qemu_ld8u:
482
        fprintf(stderr, "unimplemented qld\n");
483
        break;
484
    case INDEX_op_qemu_ld8s:
485
        fprintf(stderr, "unimplemented qld\n");
486
        break;
487
    case INDEX_op_qemu_ld16u:
488
        fprintf(stderr, "unimplemented qld\n");
489
        break;
490
    case INDEX_op_qemu_ld16s:
491
        fprintf(stderr, "unimplemented qld\n");
492
        break;
493
    case INDEX_op_qemu_ld32u:
494
        fprintf(stderr, "unimplemented qld\n");
495
        break;
496
    case INDEX_op_qemu_ld32s:
497
        fprintf(stderr, "unimplemented qld\n");
498
        break;
499
    case INDEX_op_qemu_st8:
500
        fprintf(stderr, "unimplemented qst\n");
501
        break;
502
    case INDEX_op_qemu_st16:
503
        fprintf(stderr, "unimplemented qst\n");
504
        break;
505
    case INDEX_op_qemu_st32:
506
        fprintf(stderr, "unimplemented qst\n");
507
        break;
508

    
509
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
510
    case INDEX_op_movi_i64:
511
        tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
512
        break;
513
    case INDEX_op_ld32s_i64:
514
        tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
515
        break;
516
    case INDEX_op_ld_i64:
517
        tcg_out_ldst(s, args[0], args[1], args[2], LDX);
518
        break;
519
    case INDEX_op_st_i64:
520
        tcg_out_ldst(s, args[0], args[1], args[2], STX);
521
        break;
522
    case INDEX_op_shl_i64:
523
        c = SHIFT_SLLX;
524
        goto gen_arith32;
525
    case INDEX_op_shr_i64:
526
        c = SHIFT_SRLX;
527
        goto gen_arith32;
528
    case INDEX_op_sar_i64:
529
        c = SHIFT_SRAX;
530
        goto gen_arith32;
531
    case INDEX_op_mul_i64:
532
        c = ARITH_MULX;
533
        goto gen_arith32;
534
    case INDEX_op_div2_i64:
535
        c = ARITH_SDIVX;
536
        goto gen_arith32;
537
    case INDEX_op_divu2_i64:
538
        c = ARITH_UDIVX;
539
        goto gen_arith32;
540

    
541
    case INDEX_op_brcond_i64:
542
        fprintf(stderr, "unimplemented brcond\n");
543
        break;
544
    case INDEX_op_qemu_ld64:
545
        fprintf(stderr, "unimplemented qld\n");
546
        break;
547
    case INDEX_op_qemu_st64:
548
        fprintf(stderr, "unimplemented qst\n");
549
        break;
550

    
551
#endif
552
    gen_arith32:
553
        if (const_args[2]) {
554
            tcg_out_arithi(s, args[0], args[1], args[2], c);
555
        } else {
556
            tcg_out_arith(s, args[0], args[1], args[2], c);
557
        }
558
        break;
559

    
560
    default:
561
        fprintf(stderr, "unknown opcode 0x%x\n", opc);
562
        tcg_abort();
563
    }
564
}
565

    
566
static const TCGTargetOpDef sparc_op_defs[] = {
567
    { INDEX_op_exit_tb, { } },
568
    { INDEX_op_goto_tb, { } },
569
    { INDEX_op_call, { "ri" } },
570
    { INDEX_op_jmp, { "ri" } },
571
    { INDEX_op_br, { } },
572

    
573
    { INDEX_op_mov_i32, { "r", "r" } },
574
    { INDEX_op_movi_i32, { "r" } },
575
    { INDEX_op_ld8u_i32, { "r", "r" } },
576
    { INDEX_op_ld8s_i32, { "r", "r" } },
577
    { INDEX_op_ld16u_i32, { "r", "r" } },
578
    { INDEX_op_ld16s_i32, { "r", "r" } },
579
    { INDEX_op_ld_i32, { "r", "r" } },
580
    { INDEX_op_st8_i32, { "r", "r" } },
581
    { INDEX_op_st16_i32, { "r", "r" } },
582
    { INDEX_op_st_i32, { "r", "r" } },
583

    
584
    { INDEX_op_add_i32, { "r", "r", "rJ" } },
585
    { INDEX_op_mul_i32, { "r", "r", "rJ" } },
586
    { INDEX_op_div2_i32, { "r", "r", "0", "1", "r" } },
587
    { INDEX_op_divu2_i32, { "r", "r", "0", "1", "r" } },
588
    { INDEX_op_sub_i32, { "r", "r", "rJ" } },
589
    { INDEX_op_and_i32, { "r", "r", "rJ" } },
590
    { INDEX_op_or_i32, { "r", "r", "rJ" } },
591
    { INDEX_op_xor_i32, { "r", "r", "rJ" } },
592

    
593
    { INDEX_op_shl_i32, { "r", "r", "rJ" } },
594
    { INDEX_op_shr_i32, { "r", "r", "rJ" } },
595
    { INDEX_op_sar_i32, { "r", "r", "rJ" } },
596

    
597
    { INDEX_op_brcond_i32, { "r", "ri" } },
598

    
599
    { INDEX_op_qemu_ld8u, { "r", "L" } },
600
    { INDEX_op_qemu_ld8s, { "r", "L" } },
601
    { INDEX_op_qemu_ld16u, { "r", "L" } },
602
    { INDEX_op_qemu_ld16s, { "r", "L" } },
603
    { INDEX_op_qemu_ld32u, { "r", "L" } },
604
    { INDEX_op_qemu_ld32s, { "r", "L" } },
605

    
606
    { INDEX_op_qemu_st8, { "L", "L" } },
607
    { INDEX_op_qemu_st16, { "L", "L" } },
608
    { INDEX_op_qemu_st32, { "L", "L" } },
609

    
610
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
611
    { INDEX_op_mov_i64, { "r", "r" } },
612
    { INDEX_op_movi_i64, { "r" } },
613
    { INDEX_op_ld8u_i64, { "r", "r" } },
614
    { INDEX_op_ld8s_i64, { "r", "r" } },
615
    { INDEX_op_ld16u_i64, { "r", "r" } },
616
    { INDEX_op_ld16s_i64, { "r", "r" } },
617
    { INDEX_op_ld32u_i64, { "r", "r" } },
618
    { INDEX_op_ld32s_i64, { "r", "r" } },
619
    { INDEX_op_ld_i64, { "r", "r" } },
620
    { INDEX_op_st8_i64, { "r", "r" } },
621
    { INDEX_op_st16_i64, { "r", "r" } },
622
    { INDEX_op_st32_i64, { "r", "r" } },
623
    { INDEX_op_st_i64, { "r", "r" } },
624

    
625
    { INDEX_op_add_i64, { "r", "r", "rJ" } },
626
    { INDEX_op_mul_i64, { "r", "r", "rJ" } },
627
    { INDEX_op_div2_i64, { "r", "r", "0", "1", "r" } },
628
    { INDEX_op_divu2_i64, { "r", "r", "0", "1", "r" } },
629
    { INDEX_op_sub_i64, { "r", "r", "rJ" } },
630
    { INDEX_op_and_i64, { "r", "r", "rJ" } },
631
    { INDEX_op_or_i64, { "r", "r", "rJ" } },
632
    { INDEX_op_xor_i64, { "r", "r", "rJ" } },
633

    
634
    { INDEX_op_shl_i64, { "r", "r", "rJ" } },
635
    { INDEX_op_shr_i64, { "r", "r", "rJ" } },
636
    { INDEX_op_sar_i64, { "r", "r", "rJ" } },
637

    
638
    { INDEX_op_brcond_i64, { "r", "ri" } },
639
#endif
640
    { -1 },
641
};
642

    
643
void tcg_target_init(TCGContext *s)
644
{
645
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
646
#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
647
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
648
#endif
649
    tcg_regset_set32(tcg_target_call_clobber_regs, 0,
650
                     (1 << TCG_REG_G1) |
651
                     (1 << TCG_REG_G2) |
652
                     (1 << TCG_REG_G3) |
653
                     (1 << TCG_REG_G4) |
654
                     (1 << TCG_REG_G5) |
655
                     (1 << TCG_REG_G6) |
656
                     (1 << TCG_REG_G7) |
657
                     (1 << TCG_REG_O0) |
658
                     (1 << TCG_REG_O1) |
659
                     (1 << TCG_REG_O2) |
660
                     (1 << TCG_REG_O3) |
661
                     (1 << TCG_REG_O4) |
662
                     (1 << TCG_REG_O5) |
663
                     (1 << TCG_REG_O7));
664

    
665
    tcg_regset_clear(s->reserved_regs);
666
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0);
667
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I5); // for internal use
668
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6);
669
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7);
670
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6);
671
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O7);
672
    tcg_add_target_add_op_defs(sparc_op_defs);
673
}