Statistics
| Branch: | Revision:

root / tcg / hppa / tcg-target.c @ 739734cb

History | View | Annotate | Download (53.1 kB)

1
/*
2
 * Tiny Code Generator for QEMU
3
 *
4
 * Copyright (c) 2008 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24

    
25
#ifndef NDEBUG
26
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27
    "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28
    "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29
    "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30
    "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
31
};
32
#endif
33

    
34
/* This is an 8 byte temp slot in the stack frame.  */
35
#define STACK_TEMP_OFS -16
36

    
37
#ifdef CONFIG_USE_GUEST_BASE
38
#define TCG_GUEST_BASE_REG TCG_REG_R16
39
#else
40
#define TCG_GUEST_BASE_REG TCG_REG_R0
41
#endif
42

    
43
static const int tcg_target_reg_alloc_order[] = {
44
    TCG_REG_R4,
45
    TCG_REG_R5,
46
    TCG_REG_R6,
47
    TCG_REG_R7,
48
    TCG_REG_R8,
49
    TCG_REG_R9,
50
    TCG_REG_R10,
51
    TCG_REG_R11,
52
    TCG_REG_R12,
53
    TCG_REG_R13,
54

    
55
    TCG_REG_R17,
56
    TCG_REG_R14,
57
    TCG_REG_R15,
58
    TCG_REG_R16,
59

    
60
    TCG_REG_R26,
61
    TCG_REG_R25,
62
    TCG_REG_R24,
63
    TCG_REG_R23,
64

    
65
    TCG_REG_RET0,
66
    TCG_REG_RET1,
67
};
68

    
69
static const int tcg_target_call_iarg_regs[4] = {
70
    TCG_REG_R26,
71
    TCG_REG_R25,
72
    TCG_REG_R24,
73
    TCG_REG_R23,
74
};
75

    
76
static const int tcg_target_call_oarg_regs[2] = {
77
    TCG_REG_RET0,
78
    TCG_REG_RET1,
79
};
80

    
81
/* True iff val fits a signed field of width BITS.  */
82
static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
83
{
84
    return (val << ((sizeof(tcg_target_long) * 8 - bits))
85
            >> (sizeof(tcg_target_long) * 8 - bits)) == val;
86
}
87

    
88
/* True iff depi can be used to compute (reg | MASK).
89
   Accept a bit pattern like:
90
      0....01....1
91
      1....10....0
92
      0..01..10..0
93
   Copied from gcc sources.  */
94
static inline int or_mask_p(tcg_target_ulong mask)
95
{
96
    if (mask == 0 || mask == -1) {
97
        return 0;
98
    }
99
    mask += mask & -mask;
100
    return (mask & (mask - 1)) == 0;
101
}
102

    
103
/* True iff depi or extru can be used to compute (reg & mask).
104
   Accept a bit pattern like these:
105
      0....01....1
106
      1....10....0
107
      1..10..01..1
108
   Copied from gcc sources.  */
109
static inline int and_mask_p(tcg_target_ulong mask)
110
{
111
    return or_mask_p(~mask);
112
}
113

    
114
static int low_sign_ext(int val, int len)
115
{
116
    return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
117
}
118

    
119
static int reassemble_12(int as12)
120
{
121
    return (((as12 & 0x800) >> 11) |
122
            ((as12 & 0x400) >> 8) |
123
            ((as12 & 0x3ff) << 3));
124
}
125

    
126
static int reassemble_17(int as17)
127
{
128
    return (((as17 & 0x10000) >> 16) |
129
            ((as17 & 0x0f800) << 5) |
130
            ((as17 & 0x00400) >> 8) |
131
            ((as17 & 0x003ff) << 3));
132
}
133

    
134
static int reassemble_21(int as21)
135
{
136
    return (((as21 & 0x100000) >> 20) |
137
            ((as21 & 0x0ffe00) >> 8) |
138
            ((as21 & 0x000180) << 7) |
139
            ((as21 & 0x00007c) << 14) |
140
            ((as21 & 0x000003) << 12));
141
}
142

    
143
/* ??? Bizzarely, there is no PCREL12F relocation type.  I guess all
144
   such relocations are simply fully handled by the assembler.  */
145
#define R_PARISC_PCREL12F  R_PARISC_NONE
146

    
147
static void patch_reloc(uint8_t *code_ptr, int type,
148
                        tcg_target_long value, tcg_target_long addend)
149
{
150
    uint32_t *insn_ptr = (uint32_t *)code_ptr;
151
    uint32_t insn = *insn_ptr;
152
    tcg_target_long pcrel;
153

    
154
    value += addend;
155
    pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2;
156

    
157
    switch (type) {
158
    case R_PARISC_PCREL12F:
159
        assert(check_fit_tl(pcrel, 12));
160
        /* ??? We assume all patches are forward.  See tcg_out_brcond
161
           re setting the NUL bit on the branch and eliding the nop.  */
162
        assert(pcrel >= 0);
163
        insn &= ~0x1ffdu;
164
        insn |= reassemble_12(pcrel);
165
        break;
166
    case R_PARISC_PCREL17F:
167
        assert(check_fit_tl(pcrel, 17));
168
        insn &= ~0x1f1ffdu;
169
        insn |= reassemble_17(pcrel);
170
        break;
171
    default:
172
        tcg_abort();
173
    }
174

    
175
    *insn_ptr = insn;
176
}
177

    
178
/* maximum number of register used for input function arguments */
179
static inline int tcg_target_get_call_iarg_regs_count(int flags)
180
{
181
    return 4;
182
}
183

    
184
/* parse target specific constraints */
185
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
186
{
187
    const char *ct_str;
188

    
189
    ct_str = *pct_str;
190
    switch (ct_str[0]) {
191
    case 'r':
192
        ct->ct |= TCG_CT_REG;
193
        tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
194
        break;
195
    case 'L': /* qemu_ld/st constraint */
196
        ct->ct |= TCG_CT_REG;
197
        tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
198
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
199
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
200
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
201
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
202
        break;
203
    case 'Z':
204
        ct->ct |= TCG_CT_CONST_0;
205
        break;
206
    case 'I':
207
        ct->ct |= TCG_CT_CONST_S11;
208
        break;
209
    case 'J':
210
        ct->ct |= TCG_CT_CONST_S5;
211
        break;
212
    case 'K':
213
        ct->ct |= TCG_CT_CONST_MS11;
214
        break;
215
    case 'M':
216
        ct->ct |= TCG_CT_CONST_AND;
217
        break;
218
    case 'O':
219
        ct->ct |= TCG_CT_CONST_OR;
220
        break;
221
    default:
222
        return -1;
223
    }
224
    ct_str++;
225
    *pct_str = ct_str;
226
    return 0;
227
}
228

    
229
/* test if a constant matches the constraint */
230
static int tcg_target_const_match(tcg_target_long val,
231
                                  const TCGArgConstraint *arg_ct)
232
{
233
    int ct = arg_ct->ct;
234
    if (ct & TCG_CT_CONST) {
235
        return 1;
236
    } else if (ct & TCG_CT_CONST_0) {
237
        return val == 0;
238
    } else if (ct & TCG_CT_CONST_S5) {
239
        return check_fit_tl(val, 5);
240
    } else if (ct & TCG_CT_CONST_S11) {
241
        return check_fit_tl(val, 11);
242
    } else if (ct & TCG_CT_CONST_MS11) {
243
        return check_fit_tl(-val, 11);
244
    } else if (ct & TCG_CT_CONST_AND) {
245
        return and_mask_p(val);
246
    } else if (ct & TCG_CT_CONST_OR) {
247
        return or_mask_p(val);
248
    }
249
    return 0;
250
}
251

    
252
#define INSN_OP(x)       ((x) << 26)
253
#define INSN_EXT3BR(x)   ((x) << 13)
254
#define INSN_EXT3SH(x)   ((x) << 10)
255
#define INSN_EXT4(x)     ((x) << 6)
256
#define INSN_EXT5(x)     (x)
257
#define INSN_EXT6(x)     ((x) << 6)
258
#define INSN_EXT7(x)     ((x) << 6)
259
#define INSN_EXT8A(x)    ((x) << 6)
260
#define INSN_EXT8B(x)    ((x) << 5)
261
#define INSN_T(x)        (x)
262
#define INSN_R1(x)       ((x) << 16)
263
#define INSN_R2(x)       ((x) << 21)
264
#define INSN_DEP_LEN(x)  (32 - (x))
265
#define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
266
#define INSN_SHDEP_P(x)  ((x) << 5)
267
#define INSN_COND(x)     ((x) << 13)
268
#define INSN_IM11(x)     low_sign_ext(x, 11)
269
#define INSN_IM14(x)     low_sign_ext(x, 14)
270
#define INSN_IM5(x)      (low_sign_ext(x, 5) << 16)
271

    
272
#define COND_NEVER   0
273
#define COND_EQ      1
274
#define COND_LT      2
275
#define COND_LE      3
276
#define COND_LTU     4
277
#define COND_LEU     5
278
#define COND_SV      6
279
#define COND_OD      7
280
#define COND_FALSE   8
281

    
282
#define INSN_ADD        (INSN_OP(0x02) | INSN_EXT6(0x18))
283
#define INSN_ADDC        (INSN_OP(0x02) | INSN_EXT6(0x1c))
284
#define INSN_ADDI        (INSN_OP(0x2d))
285
#define INSN_ADDIL        (INSN_OP(0x0a))
286
#define INSN_ADDL        (INSN_OP(0x02) | INSN_EXT6(0x28))
287
#define INSN_AND        (INSN_OP(0x02) | INSN_EXT6(0x08))
288
#define INSN_ANDCM        (INSN_OP(0x02) | INSN_EXT6(0x00))
289
#define INSN_COMCLR        (INSN_OP(0x02) | INSN_EXT6(0x22))
290
#define INSN_COMICLR        (INSN_OP(0x24))
291
#define INSN_DEP        (INSN_OP(0x35) | INSN_EXT3SH(3))
292
#define INSN_DEPI        (INSN_OP(0x35) | INSN_EXT3SH(7))
293
#define INSN_EXTRS        (INSN_OP(0x34) | INSN_EXT3SH(7))
294
#define INSN_EXTRU        (INSN_OP(0x34) | INSN_EXT3SH(6))
295
#define INSN_LDIL        (INSN_OP(0x08))
296
#define INSN_LDO        (INSN_OP(0x0d))
297
#define INSN_MTCTL        (INSN_OP(0x00) | INSN_EXT8B(0xc2))
298
#define INSN_OR                (INSN_OP(0x02) | INSN_EXT6(0x09))
299
#define INSN_SHD        (INSN_OP(0x34) | INSN_EXT3SH(2))
300
#define INSN_SUB        (INSN_OP(0x02) | INSN_EXT6(0x10))
301
#define INSN_SUBB        (INSN_OP(0x02) | INSN_EXT6(0x14))
302
#define INSN_SUBI        (INSN_OP(0x25))
303
#define INSN_VEXTRS        (INSN_OP(0x34) | INSN_EXT3SH(5))
304
#define INSN_VEXTRU        (INSN_OP(0x34) | INSN_EXT3SH(4))
305
#define INSN_VSHD        (INSN_OP(0x34) | INSN_EXT3SH(0))
306
#define INSN_XOR        (INSN_OP(0x02) | INSN_EXT6(0x0a))
307
#define INSN_ZDEP        (INSN_OP(0x35) | INSN_EXT3SH(2))
308
#define INSN_ZVDEP        (INSN_OP(0x35) | INSN_EXT3SH(0))
309

    
310
#define INSN_BL         (INSN_OP(0x3a) | INSN_EXT3BR(0))
311
#define INSN_BL_N       (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
312
#define INSN_BLR        (INSN_OP(0x3a) | INSN_EXT3BR(2))
313
#define INSN_BV         (INSN_OP(0x3a) | INSN_EXT3BR(6))
314
#define INSN_BV_N       (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
315
#define INSN_BLE_SR4    (INSN_OP(0x39) | (1 << 13))
316

    
317
#define INSN_LDB        (INSN_OP(0x10))
318
#define INSN_LDH        (INSN_OP(0x11))
319
#define INSN_LDW        (INSN_OP(0x12))
320
#define INSN_LDWM       (INSN_OP(0x13))
321
#define INSN_FLDDS      (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
322

    
323
#define INSN_LDBX        (INSN_OP(0x03) | INSN_EXT4(0))
324
#define INSN_LDHX        (INSN_OP(0x03) | INSN_EXT4(1))
325
#define INSN_LDWX       (INSN_OP(0x03) | INSN_EXT4(2))
326

    
327
#define INSN_STB        (INSN_OP(0x18))
328
#define INSN_STH        (INSN_OP(0x19))
329
#define INSN_STW        (INSN_OP(0x1a))
330
#define INSN_STWM       (INSN_OP(0x1b))
331
#define INSN_FSTDS      (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
332

    
333
#define INSN_COMBT      (INSN_OP(0x20))
334
#define INSN_COMBF      (INSN_OP(0x22))
335
#define INSN_COMIBT     (INSN_OP(0x21))
336
#define INSN_COMIBF     (INSN_OP(0x23))
337

    
338
/* supplied by libgcc */
339
extern void *__canonicalize_funcptr_for_compare(void *);
340

    
341
static void tcg_out_mov(TCGContext *s, int ret, int arg)
342
{
343
    /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
344
       but hppa-dis.c is unaware of this definition */
345
    if (ret != arg) {
346
        tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
347
                  | INSN_R2(TCG_REG_R0));
348
    }
349
}
350

    
351
static void tcg_out_movi(TCGContext *s, TCGType type,
352
                         int ret, tcg_target_long arg)
353
{
354
    if (check_fit_tl(arg, 14)) {
355
        tcg_out32(s, INSN_LDO | INSN_R1(ret)
356
                  | INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
357
    } else {
358
        uint32_t hi, lo;
359
        hi = arg >> 11;
360
        lo = arg & 0x7ff;
361

    
362
        tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
363
        if (lo) {
364
            tcg_out32(s, INSN_LDO | INSN_R1(ret)
365
                      | INSN_R2(ret) | INSN_IM14(lo));
366
        }
367
    }
368
}
369

    
370
static void tcg_out_ldst(TCGContext *s, int ret, int addr,
371
                         tcg_target_long offset, int op)
372
{
373
    if (!check_fit_tl(offset, 14)) {
374
        uint32_t hi, lo, op;
375

    
376
        hi = offset >> 11;
377
        lo = offset & 0x7ff;
378

    
379
        if (addr == TCG_REG_R0) {
380
            op = INSN_LDIL | INSN_R2(TCG_REG_R1);
381
        } else {
382
            op = INSN_ADDIL | INSN_R2(addr);
383
        }
384
        tcg_out32(s, op | reassemble_21(hi));
385

    
386
        addr = TCG_REG_R1;
387
        offset = lo;
388
    }
389

    
390
    if (ret != addr || offset != 0 || op != INSN_LDO) {
391
        tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
392
    }
393
}
394

    
395
/* This function is required by tcg.c.  */
396
static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
397
                              int arg1, tcg_target_long arg2)
398
{
399
    tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
400
}
401

    
402
/* This function is required by tcg.c.  */
403
static inline void tcg_out_st(TCGContext *s, TCGType type, int ret,
404
                              int arg1, tcg_target_long arg2)
405
{
406
    tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
407
}
408

    
409
static void tcg_out_ldst_index(TCGContext *s, int data,
410
                               int base, int index, int op)
411
{
412
    tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
413
}
414

    
415
static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
416
                                 tcg_target_long val)
417
{
418
    tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
419
}
420

    
421
/* This function is required by tcg.c.  */
422
static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
423
{
424
    tcg_out_addi2(s, reg, reg, val);
425
}
426

    
427
static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
428
{
429
    tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
430
}
431

    
432
static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
433
                                  tcg_target_long val, int op)
434
{
435
    assert(check_fit_tl(val, 11));
436
    tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
437
}
438

    
439
static inline void tcg_out_nop(TCGContext *s)
440
{
441
    tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
442
}
443

    
444
static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
445
{
446
    tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
447
}
448

    
449
/* Extract LEN bits at position OFS from ARG and place in RET.
450
   Note that here the bit ordering is reversed from the PA-RISC
451
   standard, such that the right-most bit is 0.  */
452
static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
453
                                unsigned ofs, unsigned len, int sign)
454
{
455
    assert(ofs < 32 && len <= 32 - ofs);
456
    tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
457
              | INSN_R1(ret) | INSN_R2(arg)
458
              | INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
459
}
460

    
461
/* Likewise with OFS interpreted little-endian.  */
462
static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
463
                               unsigned ofs, unsigned len)
464
{
465
    assert(ofs < 32 && len <= 32 - ofs);
466
    tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
467
              | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
468
}
469

    
470
static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
471
                               unsigned count)
472
{
473
    assert(count < 32);
474
    tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
475
              | INSN_SHDEP_CP(count));
476
}
477

    
478
static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
479
{
480
    tcg_out_mtctl_sar(s, creg);
481
    tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
482
}
483

    
484
static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
485
{
486
    int bs0, bs1;
487

    
488
    /* Note that the argument is constrained to match or_mask_p.  */
489
    for (bs0 = 0; bs0 < 32; bs0++) {
490
        if ((m & (1u << bs0)) != 0) {
491
            break;
492
        }
493
    }
494
    for (bs1 = bs0; bs1 < 32; bs1++) {
495
        if ((m & (1u << bs1)) == 0) {
496
            break;
497
        }
498
    }
499
    assert(bs1 == 32 || (1ul << bs1) > m);
500

    
501
    tcg_out_mov(s, ret, arg);
502
    tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(-1)
503
              | INSN_SHDEP_CP(31 - bs0) | INSN_DEP_LEN(bs1 - bs0));
504
}
505

    
506
static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
507
{
508
    int ls0, ls1, ms0;
509

    
510
    /* Note that the argument is constrained to match and_mask_p.  */
511
    for (ls0 = 0; ls0 < 32; ls0++) {
512
        if ((m & (1u << ls0)) == 0) {
513
            break;
514
        }
515
    }
516
    for (ls1 = ls0; ls1 < 32; ls1++) {
517
        if ((m & (1u << ls1)) != 0) {
518
            break;
519
        }
520
    }
521
    for (ms0 = ls1; ms0 < 32; ms0++) {
522
        if ((m & (1u << ms0)) == 0) {
523
            break;
524
        }
525
    }
526
    assert (ms0 == 32);
527

    
528
    if (ls1 == 32) {
529
        tcg_out_extr(s, ret, arg, 0, ls0, 0);
530
    } else {
531
        tcg_out_mov(s, ret, arg);
532
        tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(0)
533
                  | INSN_SHDEP_CP(31 - ls0) | INSN_DEP_LEN(ls1 - ls0));
534
    }
535
}
536

    
537
static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
538
{
539
    tcg_out_extr(s, ret, arg, 0, 8, 1);
540
}
541

    
542
static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
543
{
544
    tcg_out_extr(s, ret, arg, 0, 16, 1);
545
}
546

    
547
static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
548
{
549
    count &= 31;
550
    tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
551
              | INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
552
}
553

    
554
static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
555
{
556
    tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
557
    tcg_out_mtctl_sar(s, TCG_REG_R20);
558
    tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
559
}
560

    
561
static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
562
{
563
    count &= 31;
564
    tcg_out_extr(s, ret, arg, count, 32 - count, 0);
565
}
566

    
567
static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
568
{
569
    tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
570
}
571

    
572
static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
573
{
574
    count &= 31;
575
    tcg_out_extr(s, ret, arg, count, 32 - count, 1);
576
}
577

    
578
static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
579
{
580
    tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
581
    tcg_out_mtctl_sar(s, TCG_REG_R20);
582
    tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
583
}
584

    
585
static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
586
{
587
    count &= 31;
588
    tcg_out_shd(s, ret, arg, arg, 32 - count);
589
}
590

    
591
static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
592
{
593
    tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
594
    tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
595
}
596

    
597
static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
598
{
599
    count &= 31;
600
    tcg_out_shd(s, ret, arg, arg, count);
601
}
602

    
603
static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
604
{
605
    tcg_out_vshd(s, ret, arg, arg, creg);
606
}
607

    
608
static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
609
{
610
    if (ret != arg) {
611
        tcg_out_mov(s, ret, arg);             /* arg =  xxAB */
612
    }
613
    tcg_out_dep(s, ret, ret, 16, 8);          /* ret =  xBAB */
614
    tcg_out_extr(s, ret, ret, 8, 16, sign);   /* ret =  ..BA */
615
}
616

    
617
static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
618
{
619
                                          /* arg =  ABCD */
620
    tcg_out_rotri(s, temp, arg, 16);      /* temp = CDAB */
621
    tcg_out_dep(s, temp, temp, 16, 8);    /* temp = CBAB */
622
    tcg_out_shd(s, ret, arg, temp, 8);    /* ret =  DCBA */
623
}
624

    
625
static void tcg_out_call(TCGContext *s, void *func)
626
{
627
    tcg_target_long val, hi, lo, disp;
628

    
629
    val = (uint32_t)__canonicalize_funcptr_for_compare(func);
630
    disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
631

    
632
    if (check_fit_tl(disp, 17)) {
633
        tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
634
    } else {
635
        hi = val >> 11;
636
        lo = val & 0x7ff;
637

    
638
        tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
639
        tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
640
                  | reassemble_17(lo >> 2));
641
        tcg_out_mov(s, TCG_REG_RP, TCG_REG_R31);
642
    }
643
}
644

    
645
static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
646
                          int arg1, int arg2)
647
{
648
    /* Store both words into the stack for copy to the FPU.  */
649
    tcg_out_ldst(s, arg1, TCG_REG_SP, STACK_TEMP_OFS, INSN_STW);
650
    tcg_out_ldst(s, arg2, TCG_REG_SP, STACK_TEMP_OFS + 4, INSN_STW);
651

    
652
    /* Load both words into the FPU at the same time.  We get away
653
       with this because we can address the left and right half of the
654
       FPU registers individually once loaded.  */
655
    /* fldds stack_temp(sp),fr22 */
656
    tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_SP)
657
              | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
658

    
659
    /* xmpyu fr22r,fr22,fr22 */
660
    tcg_out32(s, 0x3ad64796);
661

    
662
    /* Store the 64-bit result back into the stack.  */
663
    /* fstds stack_temp(sp),fr22 */
664
    tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_SP)
665
              | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
666

    
667
    /* Load the pieces of the result that the caller requested.  */
668
    if (reth) {
669
        tcg_out_ldst(s, reth, TCG_REG_SP, STACK_TEMP_OFS, INSN_LDW);
670
    }
671
    if (retl) {
672
        tcg_out_ldst(s, retl, TCG_REG_SP, STACK_TEMP_OFS + 4, INSN_LDW);
673
    }
674
}
675

    
676
static void tcg_out_add2(TCGContext *s, int destl, int desth,
677
                         int al, int ah, int bl, int bh, int blconst)
678
{
679
    int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
680

    
681
    if (blconst) {
682
        tcg_out_arithi(s, tmp, al, bl, INSN_ADDI);
683
    } else {
684
        tcg_out_arith(s, tmp, al, bl, INSN_ADD);
685
    }
686
    tcg_out_arith(s, desth, ah, bh, INSN_ADDC);
687

    
688
    tcg_out_mov(s, destl, tmp);
689
}
690

    
691
static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah,
692
                         int bl, int bh, int alconst, int blconst)
693
{
694
    int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
695

    
696
    if (alconst) {
697
        if (blconst) {
698
            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl);
699
            bl = TCG_REG_R20;
700
        }
701
        tcg_out_arithi(s, tmp, bl, al, INSN_SUBI);
702
    } else if (blconst) {
703
        tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI);
704
    } else {
705
        tcg_out_arith(s, tmp, al, bl, INSN_SUB);
706
    }
707
    tcg_out_arith(s, desth, ah, bh, INSN_SUBB);
708

    
709
    tcg_out_mov(s, destl, tmp);
710
}
711

    
712
static void tcg_out_branch(TCGContext *s, int label_index, int nul)
713
{
714
    TCGLabel *l = &s->labels[label_index];
715
    uint32_t op = nul ? INSN_BL_N : INSN_BL;
716

    
717
    if (l->has_value) {
718
        tcg_target_long val = l->u.value;
719

    
720
        val -= (tcg_target_long)s->code_ptr + 8;
721
        val >>= 2;
722
        assert(check_fit_tl(val, 17));
723

    
724
        tcg_out32(s, op | reassemble_17(val));
725
    } else {
726
        tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
727
        tcg_out32(s, op);
728
    }
729
}
730

    
731
static const uint8_t tcg_cond_to_cmp_cond[10] =
732
{
733
    [TCG_COND_EQ] = COND_EQ,
734
    [TCG_COND_NE] = COND_EQ | COND_FALSE,
735
    [TCG_COND_LT] = COND_LT,
736
    [TCG_COND_GE] = COND_LT | COND_FALSE,
737
    [TCG_COND_LE] = COND_LE,
738
    [TCG_COND_GT] = COND_LE | COND_FALSE,
739
    [TCG_COND_LTU] = COND_LTU,
740
    [TCG_COND_GEU] = COND_LTU | COND_FALSE,
741
    [TCG_COND_LEU] = COND_LEU,
742
    [TCG_COND_GTU] = COND_LEU | COND_FALSE,
743
};
744

    
745
static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
746
                           TCGArg c2, int c2const, int label_index)
747
{
748
    TCGLabel *l = &s->labels[label_index];
749
    int op, pacond;
750

    
751
    /* Note that COMIB operates as if the immediate is the first
752
       operand.  We model brcond with the immediate in the second
753
       to better match what targets are likely to give us.  For
754
       consistency, model COMB with reversed operands as well.  */
755
    pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
756

    
757
    if (c2const) {
758
        op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
759
        op |= INSN_IM5(c2);
760
    } else {
761
        op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
762
        op |= INSN_R1(c2);
763
    }
764
    op |= INSN_R2(c1);
765
    op |= INSN_COND(pacond & 7);
766

    
767
    if (l->has_value) {
768
        tcg_target_long val = l->u.value;
769

    
770
        val -= (tcg_target_long)s->code_ptr + 8;
771
        val >>= 2;
772
        assert(check_fit_tl(val, 12));
773

    
774
        /* ??? Assume that all branches to defined labels are backward.
775
           Which means that if the nul bit is set, the delay slot is
776
           executed if the branch is taken, and not executed in fallthru.  */
777
        tcg_out32(s, op | reassemble_12(val));
778
        tcg_out_nop(s);
779
    } else {
780
        tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
781
        /* ??? Assume that all branches to undefined labels are forward.
782
           Which means that if the nul bit is set, the delay slot is
783
           not executed if the branch is taken, which is what we want.  */
784
        tcg_out32(s, op | 2);
785
    }
786
}
787

    
788
static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
789
                           TCGArg c1, TCGArg c2, int c2const)
790
{
791
    int op, pacond;
792

    
793
    /* Note that COMICLR operates as if the immediate is the first
794
       operand.  We model setcond with the immediate in the second
795
       to better match what targets are likely to give us.  For
796
       consistency, model COMCLR with reversed operands as well.  */
797
    pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
798

    
799
    if (c2const) {
800
        op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
801
    } else {
802
        op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
803
    }
804
    op |= INSN_COND(pacond & 7);
805
    op |= pacond & COND_FALSE ? 1 << 12 : 0;
806

    
807
    tcg_out32(s, op);
808
}
809

    
810
static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
811
                            TCGArg bl, int blconst, TCGArg bh, int bhconst,
812
                            int label_index)
813
{
814
    switch (cond) {
815
    case TCG_COND_EQ:
816
    case TCG_COND_NE:
817
        tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, al, bl, blconst);
818
        tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
819
        break;
820

    
821
    default:
822
        tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
823
        tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
824
        tcg_out_brcond(s, tcg_unsigned_cond(cond),
825
                       al, bl, blconst, label_index);
826
        break;
827
    }
828
}
829

    
830
static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
831
                            TCGArg c1, TCGArg c2, int c2const)
832
{
833
    tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
834
    tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
835
}
836

    
837
static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
838
                             TCGArg al, TCGArg ah, TCGArg bl, int blconst,
839
                             TCGArg bh, int bhconst)
840
{
841
    int scratch = TCG_REG_R20;
842

    
843
    if (ret != al && ret != ah
844
        && (blconst || ret != bl)
845
        && (bhconst || ret != bh)) {
846
        scratch = ret;
847
    }
848

    
849
    switch (cond) {
850
    case TCG_COND_EQ:
851
    case TCG_COND_NE:
852
        tcg_out_setcond(s, cond, scratch, al, bl, blconst);
853
        tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
854
        tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
855
        break;
856

    
857
    default:
858
        tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
859
        tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
860
        tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
861
        tcg_out_comclr(s, cond, TCG_REG_R0, ah, bh, bhconst);
862
        tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
863
        break;
864
    }
865

    
866
    tcg_out_mov(s, ret, scratch);
867
}
868

    
869
#if defined(CONFIG_SOFTMMU)
870
#include "../../softmmu_defs.h"
871

    
872
static void *qemu_ld_helpers[4] = {
873
    __ldb_mmu,
874
    __ldw_mmu,
875
    __ldl_mmu,
876
    __ldq_mmu,
877
};
878

    
879
static void *qemu_st_helpers[4] = {
880
    __stb_mmu,
881
    __stw_mmu,
882
    __stl_mmu,
883
    __stq_mmu,
884
};
885

    
886
/* Load and compare a TLB entry, and branch if TLB miss.  OFFSET is set to
887
   the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
888
   TLB for the memory index.  The return value is the offset from ENV
889
   contained in R1 afterward (to be used when loading ADDEND); if the
890
   return value is 0, R1 is not used.  */
891

    
892
static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
893
                            int addrhi, int s_bits, int lab_miss, int offset)
894
{
895
    int ret;
896

    
897
    /* Extracting the index into the TLB.  The "normal C operation" is
898
          r1 = addr_reg >> TARGET_PAGE_BITS;
899
          r1 &= CPU_TLB_SIZE - 1;
900
          r1 <<= CPU_TLB_ENTRY_BITS;
901
       What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
902
       and place them at CPU_TLB_ENTRY_BITS.  We can combine the first two
903
       operations with an EXTRU.  Unfortunately, the current value of
904
       CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
905
       add that follows.  */
906
    tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
907
    tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
908
    tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
909

    
910
    /* Make sure that both the addr_{read,write} and addend can be
911
       read with a 14-bit offset from the same base register.  */
912
    if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
913
        ret = 0;
914
    } else {
915
        ret = (offset + 0x400) & ~0x7ff;
916
        offset = ret - offset;
917
        tcg_out_addi2(s, TCG_REG_R1, r1, ret);
918
        r1 = TCG_REG_R1;
919
    }
920

    
921
    /* Load the entry from the computed slot.  */
922
    if (TARGET_LONG_BITS == 64) {
923
        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
924
        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
925
    } else {
926
        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
927
    }
928

    
929
    /* Compute the value that ought to appear in the TLB for a hit, namely, the page
930
       of the address.  We include the low N bits of the address to catch unaligned
931
       accesses and force them onto the slow path.  Do this computation after having
932
       issued the load from the TLB slot to give the load time to complete.  */
933
    tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
934

    
935
    /* If not equal, jump to lab_miss. */
936
    if (TARGET_LONG_BITS == 64) {
937
        tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
938
                        r0, 0, addrhi, 0, lab_miss);
939
    } else {
940
        tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
941
    }
942

    
943
    return ret;
944
}
945
#endif
946

    
947
static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo_reg, int datahi_reg,
948
                                   int addr_reg, int addend_reg, int opc)
949
{
950
#ifdef TARGET_WORDS_BIGENDIAN
951
    const int bswap = 0;
952
#else
953
    const int bswap = 1;
954
#endif
955

    
956
    switch (opc) {
957
    case 0:
958
        tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
959
        break;
960
    case 0 | 4:
961
        tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
962
        tcg_out_ext8s(s, datalo_reg, datalo_reg);
963
        break;
964
    case 1:
965
        tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
966
        if (bswap) {
967
            tcg_out_bswap16(s, datalo_reg, datalo_reg, 0);
968
        }
969
        break;
970
    case 1 | 4:
971
        tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
972
        if (bswap) {
973
            tcg_out_bswap16(s, datalo_reg, datalo_reg, 1);
974
        } else {
975
            tcg_out_ext16s(s, datalo_reg, datalo_reg);
976
        }
977
        break;
978
    case 2:
979
        tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDWX);
980
        if (bswap) {
981
            tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
982
        }
983
        break;
984
    case 3:
985
        if (bswap) {
986
            int t = datahi_reg;
987
            datahi_reg = datalo_reg;
988
            datalo_reg = t;
989
        }
990
        /* We can't access the low-part with a reg+reg addressing mode,
991
           so perform the addition now and use reg_ofs addressing mode.  */
992
        if (addend_reg != TCG_REG_R0) {
993
            tcg_out_arith(s, TCG_REG_R20, addr_reg, addend_reg, INSN_ADD);
994
            addr_reg = TCG_REG_R20;
995
        }
996
        /* Make sure not to clobber the base register.  */
997
        if (datahi_reg == addr_reg) {
998
            tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
999
            tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1000
        } else {
1001
            tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1002
            tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1003
        }
1004
        if (bswap) {
1005
            tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1006
            tcg_out_bswap32(s, datahi_reg, datahi_reg, TCG_REG_R20);
1007
        }
1008
        break;
1009
    default:
1010
        tcg_abort();
1011
    }
1012
}
1013

    
1014
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1015
{
1016
    int datalo_reg = *args++;
1017
    /* Note that datahi_reg is only used for 64-bit loads.  */
1018
    int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1019
    int addrlo_reg = *args++;
1020

    
1021
#if defined(CONFIG_SOFTMMU)
1022
    /* Note that addrhi_reg is only used for 64-bit guests.  */
1023
    int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1024
    int mem_index = *args;
1025
    int lab1, lab2, argreg, offset;
1026

    
1027
    lab1 = gen_new_label();
1028
    lab2 = gen_new_label();
1029

    
1030
    offset = offsetof(CPUState, tlb_table[mem_index][0].addr_read);
1031
    offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
1032
                              opc & 3, lab1, offset);
1033

    
1034
    /* TLB Hit.  */
1035
    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
1036
               offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
1037
    tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg, TCG_REG_R20, opc);
1038
    tcg_out_branch(s, lab2, 1);
1039

    
1040
    /* TLB Miss.  */
1041
    /* label1: */
1042
    tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr);
1043

    
1044
    argreg = TCG_REG_R26;
1045
    tcg_out_mov(s, argreg--, addrlo_reg);
1046
    if (TARGET_LONG_BITS == 64) {
1047
        tcg_out_mov(s, argreg--, addrhi_reg);
1048
    }
1049
    tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1050

    
1051
    tcg_out_call(s, qemu_ld_helpers[opc & 3]);
1052

    
1053
    switch (opc) {
1054
    case 0:
1055
        tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xff);
1056
        break;
1057
    case 0 | 4:
1058
        tcg_out_ext8s(s, datalo_reg, TCG_REG_RET0);
1059
        break;
1060
    case 1:
1061
        tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xffff);
1062
        break;
1063
    case 1 | 4:
1064
        tcg_out_ext16s(s, datalo_reg, TCG_REG_RET0);
1065
        break;
1066
    case 2:
1067
    case 2 | 4:
1068
        tcg_out_mov(s, datalo_reg, TCG_REG_RET0);
1069
        break;
1070
    case 3:
1071
        tcg_out_mov(s, datahi_reg, TCG_REG_RET0);
1072
        tcg_out_mov(s, datalo_reg, TCG_REG_RET1);
1073
        break;
1074
    default:
1075
        tcg_abort();
1076
    }
1077

    
1078
    /* label2: */
1079
    tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr);
1080
#else
1081
    tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1082
                           (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0), opc);
1083
#endif
1084
}
1085

    
1086
static void tcg_out_qemu_st_direct(TCGContext *s, int datalo_reg, int datahi_reg,
1087
                                   int addr_reg, int opc)
1088
{
1089
#ifdef TARGET_WORDS_BIGENDIAN
1090
    const int bswap = 0;
1091
#else
1092
    const int bswap = 1;
1093
#endif
1094

    
1095
    switch (opc) {
1096
    case 0:
1097
        tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STB);
1098
        break;
1099
    case 1:
1100
        if (bswap) {
1101
            tcg_out_bswap16(s, TCG_REG_R20, datalo_reg, 0);
1102
            datalo_reg = TCG_REG_R20;
1103
        }
1104
        tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STH);
1105
        break;
1106
    case 2:
1107
        if (bswap) {
1108
            tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1109
            datalo_reg = TCG_REG_R20;
1110
        }
1111
        tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STW);
1112
        break;
1113
    case 3:
1114
        if (bswap) {
1115
            tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1116
            tcg_out_bswap32(s, TCG_REG_R23, datahi_reg, TCG_REG_R23);
1117
            datahi_reg = TCG_REG_R20;
1118
            datalo_reg = TCG_REG_R23;
1119
        }
1120
        tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_STW);
1121
        tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_STW);
1122
        break;
1123
    default:
1124
        tcg_abort();
1125
    }
1126

    
1127
}
1128

    
1129
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1130
{
1131
    int datalo_reg = *args++;
1132
    /* Note that datahi_reg is only used for 64-bit loads.  */
1133
    int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1134
    int addrlo_reg = *args++;
1135

    
1136
#if defined(CONFIG_SOFTMMU)
1137
    /* Note that addrhi_reg is only used for 64-bit guests.  */
1138
    int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1139
    int mem_index = *args;
1140
    int lab1, lab2, argreg, offset;
1141

    
1142
    lab1 = gen_new_label();
1143
    lab2 = gen_new_label();
1144

    
1145
    offset = offsetof(CPUState, tlb_table[mem_index][0].addr_write);
1146
    offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
1147
                              opc, lab1, offset);
1148

    
1149
    /* TLB Hit.  */
1150
    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
1151
               offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
1152

    
1153
    /* There are no indexed stores, so we must do this addition explitly.
1154
       Careful to avoid R20, which is used for the bswaps to follow.  */
1155
    tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_REG_R20, INSN_ADDL);
1156
    tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, TCG_REG_R31, opc);
1157
    tcg_out_branch(s, lab2, 1);
1158

    
1159
    /* TLB Miss.  */
1160
    /* label1: */
1161
    tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr);
1162

    
1163
    argreg = TCG_REG_R26;
1164
    tcg_out_mov(s, argreg--, addrlo_reg);
1165
    if (TARGET_LONG_BITS == 64) {
1166
        tcg_out_mov(s, argreg--, addrhi_reg);
1167
    }
1168

    
1169
    switch(opc) {
1170
    case 0:
1171
        tcg_out_andi(s, argreg--, datalo_reg, 0xff);
1172
        tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1173
        break;
1174
    case 1:
1175
        tcg_out_andi(s, argreg--, datalo_reg, 0xffff);
1176
        tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1177
        break;
1178
    case 2:
1179
        tcg_out_mov(s, argreg--, datalo_reg);
1180
        tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1181
        break;
1182
    case 3:
1183
        /* Because of the alignment required by the 64-bit data argument,
1184
           we will always use R23/R24.  Also, we will always run out of
1185
           argument registers for storing mem_index, so that will have
1186
           to go on the stack.  */
1187
        if (mem_index == 0) {
1188
            argreg = TCG_REG_R0;
1189
        } else {
1190
            argreg = TCG_REG_R20;
1191
            tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1192
        }
1193
        tcg_out_mov(s, TCG_REG_R23, datahi_reg);
1194
        tcg_out_mov(s, TCG_REG_R24, datalo_reg);
1195
        tcg_out_st(s, TCG_TYPE_I32, argreg, TCG_REG_SP,
1196
                   TCG_TARGET_CALL_STACK_OFFSET - 4);
1197
        break;
1198
    default:
1199
        tcg_abort();
1200
    }
1201

    
1202
    tcg_out_call(s, qemu_st_helpers[opc]);
1203

    
1204
    /* label2: */
1205
    tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr);
1206
#else
1207
    /* There are no indexed stores, so if GUEST_BASE is set we must do the add
1208
       explicitly.  Careful to avoid R20, which is used for the bswaps to follow.  */
1209
    if (GUEST_BASE != 0) {
1210
        tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_GUEST_BASE_REG, INSN_ADDL);
1211
        addrlo_reg = TCG_REG_R31;
1212
    }
1213
    tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, addrlo_reg, opc);
1214
#endif
1215
}
1216

    
1217
static void tcg_out_exit_tb(TCGContext *s, TCGArg arg)
1218
{
1219
    if (!check_fit_tl(arg, 14)) {
1220
        uint32_t hi, lo;
1221
        hi = arg & ~0x7ff;
1222
        lo = arg & 0x7ff;
1223
        if (lo) {
1224
            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi);
1225
            tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1226
            tcg_out_addi(s, TCG_REG_RET0, lo);
1227
            return;
1228
        }
1229
        arg = hi;
1230
    }
1231
    tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1232
    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg);
1233
}
1234

    
1235
static void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
1236
{
1237
    if (s->tb_jmp_offset) {
1238
        /* direct jump method */
1239
        fprintf(stderr, "goto_tb direct\n");
1240
        tcg_abort();
1241
    } else {
1242
        /* indirect jump method */
1243
        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0,
1244
                   (tcg_target_long)(s->tb_next + arg));
1245
        tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20));
1246
    }
1247
    s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
1248
}
1249

    
1250
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1251
                              const int *const_args)
1252
{
1253
    switch (opc) {
1254
    case INDEX_op_exit_tb:
1255
        tcg_out_exit_tb(s, args[0]);
1256
        break;
1257
    case INDEX_op_goto_tb:
1258
        tcg_out_goto_tb(s, args[0]);
1259
        break;
1260

    
1261
    case INDEX_op_call:
1262
        if (const_args[0]) {
1263
            tcg_out_call(s, (void *)args[0]);
1264
        } else {
1265
            /* ??? FIXME: the value in the register in args[0] is almost
1266
               certainly a procedure descriptor, not a code address.  We
1267
               probably need to use the millicode $$dyncall routine.  */
1268
            tcg_abort();
1269
        }
1270
        break;
1271

    
1272
    case INDEX_op_jmp:
1273
        fprintf(stderr, "unimplemented jmp\n");
1274
        tcg_abort();
1275
        break;
1276

    
1277
    case INDEX_op_br:
1278
        tcg_out_branch(s, args[0], 1);
1279
        break;
1280

    
1281
    case INDEX_op_movi_i32:
1282
        tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1283
        break;
1284

    
1285
    case INDEX_op_ld8u_i32:
1286
        tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1287
        break;
1288
    case INDEX_op_ld8s_i32:
1289
        tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1290
        tcg_out_ext8s(s, args[0], args[0]);
1291
        break;
1292
    case INDEX_op_ld16u_i32:
1293
        tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1294
        break;
1295
    case INDEX_op_ld16s_i32:
1296
        tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1297
        tcg_out_ext16s(s, args[0], args[0]);
1298
        break;
1299
    case INDEX_op_ld_i32:
1300
        tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW);
1301
        break;
1302

    
1303
    case INDEX_op_st8_i32:
1304
        tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB);
1305
        break;
1306
    case INDEX_op_st16_i32:
1307
        tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH);
1308
        break;
1309
    case INDEX_op_st_i32:
1310
        tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW);
1311
        break;
1312

    
1313
    case INDEX_op_add_i32:
1314
        if (const_args[2]) {
1315
            tcg_out_addi2(s, args[0], args[1], args[2]);
1316
        } else {
1317
            tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL);
1318
        }
1319
        break;
1320

    
1321
    case INDEX_op_sub_i32:
1322
        if (const_args[1]) {
1323
            if (const_args[2]) {
1324
                tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]);
1325
            } else {
1326
                /* Recall that SUBI is a reversed subtract.  */
1327
                tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI);
1328
            }
1329
        } else if (const_args[2]) {
1330
            tcg_out_addi2(s, args[0], args[1], -args[2]);
1331
        } else {
1332
            tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB);
1333
        }
1334
        break;
1335

    
1336
    case INDEX_op_and_i32:
1337
        if (const_args[2]) {
1338
            tcg_out_andi(s, args[0], args[1], args[2]);
1339
        } else {
1340
            tcg_out_arith(s, args[0], args[1], args[2], INSN_AND);
1341
        }
1342
        break;
1343

    
1344
    case INDEX_op_or_i32:
1345
        if (const_args[2]) {
1346
            tcg_out_ori(s, args[0], args[1], args[2]);
1347
        } else {
1348
            tcg_out_arith(s, args[0], args[1], args[2], INSN_OR);
1349
        }
1350
        break;
1351

    
1352
    case INDEX_op_xor_i32:
1353
        tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR);
1354
        break;
1355

    
1356
    case INDEX_op_andc_i32:
1357
        if (const_args[2]) {
1358
            tcg_out_andi(s, args[0], args[1], ~args[2]);
1359
        } else {
1360
            tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM);
1361
        }
1362
        break;
1363

    
1364
    case INDEX_op_shl_i32:
1365
        if (const_args[2]) {
1366
            tcg_out_shli(s, args[0], args[1], args[2]);
1367
        } else {
1368
            tcg_out_shl(s, args[0], args[1], args[2]);
1369
        }
1370
        break;
1371

    
1372
    case INDEX_op_shr_i32:
1373
        if (const_args[2]) {
1374
            tcg_out_shri(s, args[0], args[1], args[2]);
1375
        } else {
1376
            tcg_out_shr(s, args[0], args[1], args[2]);
1377
        }
1378
        break;
1379

    
1380
    case INDEX_op_sar_i32:
1381
        if (const_args[2]) {
1382
            tcg_out_sari(s, args[0], args[1], args[2]);
1383
        } else {
1384
            tcg_out_sar(s, args[0], args[1], args[2]);
1385
        }
1386
        break;
1387

    
1388
    case INDEX_op_rotl_i32:
1389
        if (const_args[2]) {
1390
            tcg_out_rotli(s, args[0], args[1], args[2]);
1391
        } else {
1392
            tcg_out_rotl(s, args[0], args[1], args[2]);
1393
        }
1394
        break;
1395

    
1396
    case INDEX_op_rotr_i32:
1397
        if (const_args[2]) {
1398
            tcg_out_rotri(s, args[0], args[1], args[2]);
1399
        } else {
1400
            tcg_out_rotr(s, args[0], args[1], args[2]);
1401
        }
1402
        break;
1403

    
1404
    case INDEX_op_mul_i32:
1405
        tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]);
1406
        break;
1407
    case INDEX_op_mulu2_i32:
1408
        tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]);
1409
        break;
1410

    
1411
    case INDEX_op_bswap16_i32:
1412
        tcg_out_bswap16(s, args[0], args[1], 0);
1413
        break;
1414
    case INDEX_op_bswap32_i32:
1415
        tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20);
1416
        break;
1417

    
1418
    case INDEX_op_not_i32:
1419
        tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI);
1420
        break;
1421
    case INDEX_op_ext8s_i32:
1422
        tcg_out_ext8s(s, args[0], args[1]);
1423
        break;
1424
    case INDEX_op_ext16s_i32:
1425
        tcg_out_ext16s(s, args[0], args[1]);
1426
        break;
1427

    
1428
    /* These three correspond exactly to the fallback implementation.
1429
       But by including them we reduce the number of TCG ops that
1430
       need to be generated, and these opcodes are fairly common.  */
1431
    case INDEX_op_neg_i32:
1432
        tcg_out_arith(s, args[0], TCG_REG_R0, args[1], INSN_SUB);
1433
        break;
1434
    case INDEX_op_ext8u_i32:
1435
        tcg_out_andi(s, args[0], args[1], 0xff);
1436
        break;
1437
    case INDEX_op_ext16u_i32:
1438
        tcg_out_andi(s, args[0], args[1], 0xffff);
1439
        break;
1440

    
1441
    case INDEX_op_brcond_i32:
1442
        tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1443
        break;
1444
    case INDEX_op_brcond2_i32:
1445
        tcg_out_brcond2(s, args[4], args[0], args[1],
1446
                        args[2], const_args[2],
1447
                        args[3], const_args[3], args[5]);
1448
        break;
1449

    
1450
    case INDEX_op_setcond_i32:
1451
        tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1452
        break;
1453
    case INDEX_op_setcond2_i32:
1454
        tcg_out_setcond2(s, args[5], args[0], args[1], args[2],
1455
                         args[3], const_args[3], args[4], const_args[4]);
1456
        break;
1457

    
1458
    case INDEX_op_add2_i32:
1459
        tcg_out_add2(s, args[0], args[1], args[2], args[3],
1460
                     args[4], args[5], const_args[4]);
1461
        break;
1462

    
1463
    case INDEX_op_sub2_i32:
1464
        tcg_out_sub2(s, args[0], args[1], args[2], args[3],
1465
                     args[4], args[5], const_args[2], const_args[4]);
1466
        break;
1467

    
1468
    case INDEX_op_qemu_ld8u:
1469
        tcg_out_qemu_ld(s, args, 0);
1470
        break;
1471
    case INDEX_op_qemu_ld8s:
1472
        tcg_out_qemu_ld(s, args, 0 | 4);
1473
        break;
1474
    case INDEX_op_qemu_ld16u:
1475
        tcg_out_qemu_ld(s, args, 1);
1476
        break;
1477
    case INDEX_op_qemu_ld16s:
1478
        tcg_out_qemu_ld(s, args, 1 | 4);
1479
        break;
1480
    case INDEX_op_qemu_ld32:
1481
        tcg_out_qemu_ld(s, args, 2);
1482
        break;
1483
    case INDEX_op_qemu_ld64:
1484
        tcg_out_qemu_ld(s, args, 3);
1485
        break;
1486

    
1487
    case INDEX_op_qemu_st8:
1488
        tcg_out_qemu_st(s, args, 0);
1489
        break;
1490
    case INDEX_op_qemu_st16:
1491
        tcg_out_qemu_st(s, args, 1);
1492
        break;
1493
    case INDEX_op_qemu_st32:
1494
        tcg_out_qemu_st(s, args, 2);
1495
        break;
1496
    case INDEX_op_qemu_st64:
1497
        tcg_out_qemu_st(s, args, 3);
1498
        break;
1499

    
1500
    default:
1501
        fprintf(stderr, "unknown opcode 0x%x\n", opc);
1502
        tcg_abort();
1503
    }
1504
}
1505

    
1506
static const TCGTargetOpDef hppa_op_defs[] = {
1507
    { INDEX_op_exit_tb, { } },
1508
    { INDEX_op_goto_tb, { } },
1509

    
1510
    { INDEX_op_call, { "ri" } },
1511
    { INDEX_op_jmp, { "r" } },
1512
    { INDEX_op_br, { } },
1513

    
1514
    { INDEX_op_mov_i32, { "r", "r" } },
1515
    { INDEX_op_movi_i32, { "r" } },
1516

    
1517
    { INDEX_op_ld8u_i32, { "r", "r" } },
1518
    { INDEX_op_ld8s_i32, { "r", "r" } },
1519
    { INDEX_op_ld16u_i32, { "r", "r" } },
1520
    { INDEX_op_ld16s_i32, { "r", "r" } },
1521
    { INDEX_op_ld_i32, { "r", "r" } },
1522
    { INDEX_op_st8_i32, { "rZ", "r" } },
1523
    { INDEX_op_st16_i32, { "rZ", "r" } },
1524
    { INDEX_op_st_i32, { "rZ", "r" } },
1525

    
1526
    { INDEX_op_add_i32, { "r", "rZ", "ri" } },
1527
    { INDEX_op_sub_i32, { "r", "rI", "ri" } },
1528
    { INDEX_op_and_i32, { "r", "rZ", "rM" } },
1529
    { INDEX_op_or_i32, { "r", "rZ", "rO" } },
1530
    { INDEX_op_xor_i32, { "r", "rZ", "rZ" } },
1531
    /* Note that the second argument will be inverted, which means
1532
       we want a constant whose inversion matches M, and that O = ~M.
1533
       See the implementation of and_mask_p.  */
1534
    { INDEX_op_andc_i32, { "r", "rZ", "rO" } },
1535

    
1536
    { INDEX_op_mul_i32, { "r", "r", "r" } },
1537
    { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1538

    
1539
    { INDEX_op_shl_i32, { "r", "r", "ri" } },
1540
    { INDEX_op_shr_i32, { "r", "r", "ri" } },
1541
    { INDEX_op_sar_i32, { "r", "r", "ri" } },
1542
    { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1543
    { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1544

    
1545
    { INDEX_op_bswap16_i32, { "r", "r" } },
1546
    { INDEX_op_bswap32_i32, { "r", "r" } },
1547
    { INDEX_op_neg_i32, { "r", "r" } },
1548
    { INDEX_op_not_i32, { "r", "r" } },
1549

    
1550
    { INDEX_op_ext8s_i32, { "r", "r" } },
1551
    { INDEX_op_ext8u_i32, { "r", "r" } },
1552
    { INDEX_op_ext16s_i32, { "r", "r" } },
1553
    { INDEX_op_ext16u_i32, { "r", "r" } },
1554

    
1555
    { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1556
    { INDEX_op_brcond2_i32,  { "rZ", "rZ", "rJ", "rJ" } },
1557

    
1558
    { INDEX_op_setcond_i32, { "r", "rZ", "rI" } },
1559
    { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } },
1560

    
1561
    { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
1562
    { INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } },
1563

    
1564
#if TARGET_LONG_BITS == 32
1565
    { INDEX_op_qemu_ld8u, { "r", "L" } },
1566
    { INDEX_op_qemu_ld8s, { "r", "L" } },
1567
    { INDEX_op_qemu_ld16u, { "r", "L" } },
1568
    { INDEX_op_qemu_ld16s, { "r", "L" } },
1569
    { INDEX_op_qemu_ld32, { "r", "L" } },
1570
    { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1571

    
1572
    { INDEX_op_qemu_st8, { "LZ", "L" } },
1573
    { INDEX_op_qemu_st16, { "LZ", "L" } },
1574
    { INDEX_op_qemu_st32, { "LZ", "L" } },
1575
    { INDEX_op_qemu_st64, { "LZ", "LZ", "L" } },
1576
#else
1577
    { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1578
    { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1579
    { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1580
    { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1581
    { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1582
    { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1583

    
1584
    { INDEX_op_qemu_st8, { "LZ", "L", "L" } },
1585
    { INDEX_op_qemu_st16, { "LZ", "L", "L" } },
1586
    { INDEX_op_qemu_st32, { "LZ", "L", "L" } },
1587
    { INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } },
1588
#endif
1589
    { -1 },
1590
};
1591

    
1592
static int tcg_target_callee_save_regs[] = {
1593
    /* R2, the return address register, is saved specially
1594
       in the caller's frame.  */
1595
    /* R3, the frame pointer, is not currently modified.  */
1596
    TCG_REG_R4,
1597
    TCG_REG_R5,
1598
    TCG_REG_R6,
1599
    TCG_REG_R7,
1600
    TCG_REG_R8,
1601
    TCG_REG_R9,
1602
    TCG_REG_R10,
1603
    TCG_REG_R11,
1604
    TCG_REG_R12,
1605
    TCG_REG_R13,
1606
    TCG_REG_R14,
1607
    TCG_REG_R15,
1608
    TCG_REG_R16,
1609
    /* R17 is the global env, so no need to save.  */
1610
    TCG_REG_R18
1611
};
1612

    
1613
void tcg_target_qemu_prologue(TCGContext *s)
1614
{
1615
    int frame_size, i;
1616

    
1617
    /* Allocate space for the fixed frame marker.  */
1618
    frame_size = -TCG_TARGET_CALL_STACK_OFFSET;
1619
    frame_size += TCG_TARGET_STATIC_CALL_ARGS_SIZE;
1620

    
1621
    /* Allocate space for the saved registers.  */
1622
    frame_size += ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
1623

    
1624
    /* Align the allocated space.  */
1625
    frame_size = ((frame_size + TCG_TARGET_STACK_ALIGN - 1)
1626
                  & -TCG_TARGET_STACK_ALIGN);
1627

    
1628
    /* The return address is stored in the caller's frame.  */
1629
    tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_SP, -20);
1630

    
1631
    /* Allocate stack frame, saving the first register at the same time.  */
1632
    tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1633
                 TCG_REG_SP, frame_size, INSN_STWM);
1634

    
1635
    /* Save all callee saved registers.  */
1636
    for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1637
        tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1638
                   TCG_REG_SP, -frame_size + i * 4);
1639
    }
1640

    
1641
#ifdef CONFIG_USE_GUEST_BASE
1642
    /* Note that GUEST_BASE can change after the prologue is generated.
1643
       To combat that, load the value from the variable instead of
1644
       embedding a constant here.  */
1645
    tcg_out_ld(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1646
               TCG_REG_R0, (tcg_target_long)&guest_base);
1647
#endif
1648

    
1649
    /* Jump to TB, and adjust R18 to be the return address.  */
1650
    tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R26));
1651
    tcg_out_mov(s, TCG_REG_R18, TCG_REG_R31);
1652

    
1653
    /* Restore callee saved registers.  */
1654
    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_SP, -frame_size - 20);
1655
    for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1656
        tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1657
                   TCG_REG_SP, -frame_size + i * 4);
1658
    }
1659

    
1660
    /* Deallocate stack frame and return.  */
1661
    tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP));
1662
    tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1663
                 TCG_REG_SP, -frame_size, INSN_LDWM);
1664
}
1665

    
1666
void tcg_target_init(TCGContext *s)
1667
{
1668
    tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1669

    
1670
    tcg_regset_clear(tcg_target_call_clobber_regs);
1671
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
1672
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
1673
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
1674
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
1675
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
1676
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
1677
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
1678
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0);
1679
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1);
1680

    
1681
    tcg_regset_clear(s->reserved_regs);
1682
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);  /* hardwired to zero */
1683
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1);  /* addil target */
1684
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP);  /* link register */
1685
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3);  /* frame pointer */
1686
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
1687
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
1688
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
1689
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP);  /* data pointer */
1690
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);  /* stack pointer */
1691
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
1692
#ifdef CONFIG_USE_GUEST_BASE
1693
    tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1694
#endif
1695

    
1696
    tcg_add_target_add_op_defs(hppa_op_defs);
1697
}