Revision 48bb3750 tcg/s390/tcg-target.c

b/tcg/s390/tcg-target.c
2 2
 * Tiny Code Generator for QEMU
3 3
 *
4 4
 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5
 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6
 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
5 7
 *
6 8
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 9
 * of this software and associated documentation files (the "Software"), to deal
......
22 24
 * THE SOFTWARE.
23 25
 */
24 26

  
27
/* ??? The translation blocks produced by TCG are generally small enough to
28
   be entirely reachable with a 16-bit displacement.  Leaving the option for
29
   a 32-bit displacement here Just In Case.  */
30
#define USE_LONG_BRANCHES 0
31

  
32
#define TCG_CT_CONST_32    0x0100
33
#define TCG_CT_CONST_NEG   0x0200
34
#define TCG_CT_CONST_ADDI  0x0400
35
#define TCG_CT_CONST_MULI  0x0800
36
#define TCG_CT_CONST_ANDI  0x1000
37
#define TCG_CT_CONST_ORI   0x2000
38
#define TCG_CT_CONST_XORI  0x4000
39
#define TCG_CT_CONST_CMPI  0x8000
40

  
41
/* Several places within the instruction set 0 means "no register"
42
   rather than TCG_REG_R0.  */
43
#define TCG_REG_NONE    0
44

  
45
/* A scratch register that may be be used throughout the backend.  */
46
#define TCG_TMP0        TCG_REG_R14
47

  
48
#ifdef CONFIG_USE_GUEST_BASE
49
#define TCG_GUEST_BASE_REG TCG_REG_R13
50
#else
51
#define TCG_GUEST_BASE_REG TCG_REG_R0
52
#endif
53

  
54
#ifndef GUEST_BASE
55
#define GUEST_BASE 0
56
#endif
57

  
58

  
59
/* All of the following instructions are prefixed with their instruction
60
   format, and are defined as 8- or 16-bit quantities, even when the two
61
   halves of the 16-bit quantity may appear 32 bits apart in the insn.
62
   This makes it easy to copy the values from the tables in Appendix B.  */
63
typedef enum S390Opcode {
64
    RIL_AFI     = 0xc209,
65
    RIL_AGFI    = 0xc208,
66
    RIL_ALGFI   = 0xc20a,
67
    RIL_BRASL   = 0xc005,
68
    RIL_BRCL    = 0xc004,
69
    RIL_CFI     = 0xc20d,
70
    RIL_CGFI    = 0xc20c,
71
    RIL_CLFI    = 0xc20f,
72
    RIL_CLGFI   = 0xc20e,
73
    RIL_IIHF    = 0xc008,
74
    RIL_IILF    = 0xc009,
75
    RIL_LARL    = 0xc000,
76
    RIL_LGFI    = 0xc001,
77
    RIL_LGRL    = 0xc408,
78
    RIL_LLIHF   = 0xc00e,
79
    RIL_LLILF   = 0xc00f,
80
    RIL_LRL     = 0xc40d,
81
    RIL_MSFI    = 0xc201,
82
    RIL_MSGFI   = 0xc200,
83
    RIL_NIHF    = 0xc00a,
84
    RIL_NILF    = 0xc00b,
85
    RIL_OIHF    = 0xc00c,
86
    RIL_OILF    = 0xc00d,
87
    RIL_XIHF    = 0xc006,
88
    RIL_XILF    = 0xc007,
89

  
90
    RI_AGHI     = 0xa70b,
91
    RI_AHI      = 0xa70a,
92
    RI_BRC      = 0xa704,
93
    RI_IIHH     = 0xa500,
94
    RI_IIHL     = 0xa501,
95
    RI_IILH     = 0xa502,
96
    RI_IILL     = 0xa503,
97
    RI_LGHI     = 0xa709,
98
    RI_LLIHH    = 0xa50c,
99
    RI_LLIHL    = 0xa50d,
100
    RI_LLILH    = 0xa50e,
101
    RI_LLILL    = 0xa50f,
102
    RI_MGHI     = 0xa70d,
103
    RI_MHI      = 0xa70c,
104
    RI_NIHH     = 0xa504,
105
    RI_NIHL     = 0xa505,
106
    RI_NILH     = 0xa506,
107
    RI_NILL     = 0xa507,
108
    RI_OIHH     = 0xa508,
109
    RI_OIHL     = 0xa509,
110
    RI_OILH     = 0xa50a,
111
    RI_OILL     = 0xa50b,
112

  
113
    RIE_CGIJ    = 0xec7c,
114
    RIE_CGRJ    = 0xec64,
115
    RIE_CIJ     = 0xec7e,
116
    RIE_CLGRJ   = 0xec65,
117
    RIE_CLIJ    = 0xec7f,
118
    RIE_CLGIJ   = 0xec7d,
119
    RIE_CLRJ    = 0xec77,
120
    RIE_CRJ     = 0xec76,
121

  
122
    RRE_AGR     = 0xb908,
123
    RRE_CGR     = 0xb920,
124
    RRE_CLGR    = 0xb921,
125
    RRE_DLGR    = 0xb987,
126
    RRE_DLR     = 0xb997,
127
    RRE_DSGFR   = 0xb91d,
128
    RRE_DSGR    = 0xb90d,
129
    RRE_LGBR    = 0xb906,
130
    RRE_LCGR    = 0xb903,
131
    RRE_LGFR    = 0xb914,
132
    RRE_LGHR    = 0xb907,
133
    RRE_LGR     = 0xb904,
134
    RRE_LLGCR   = 0xb984,
135
    RRE_LLGFR   = 0xb916,
136
    RRE_LLGHR   = 0xb985,
137
    RRE_LRVR    = 0xb91f,
138
    RRE_LRVGR   = 0xb90f,
139
    RRE_LTGR    = 0xb902,
140
    RRE_MSGR    = 0xb90c,
141
    RRE_MSR     = 0xb252,
142
    RRE_NGR     = 0xb980,
143
    RRE_OGR     = 0xb981,
144
    RRE_SGR     = 0xb909,
145
    RRE_XGR     = 0xb982,
146

  
147
    RR_AR       = 0x1a,
148
    RR_BASR     = 0x0d,
149
    RR_BCR      = 0x07,
150
    RR_CLR      = 0x15,
151
    RR_CR       = 0x19,
152
    RR_DR       = 0x1d,
153
    RR_LCR      = 0x13,
154
    RR_LR       = 0x18,
155
    RR_LTR      = 0x12,
156
    RR_NR       = 0x14,
157
    RR_OR       = 0x16,
158
    RR_SR       = 0x1b,
159
    RR_XR       = 0x17,
160

  
161
    RSY_RLL     = 0xeb1d,
162
    RSY_RLLG    = 0xeb1c,
163
    RSY_SLLG    = 0xeb0d,
164
    RSY_SRAG    = 0xeb0a,
165
    RSY_SRLG    = 0xeb0c,
166

  
167
    RS_SLL      = 0x89,
168
    RS_SRA      = 0x8a,
169
    RS_SRL      = 0x88,
170

  
171
    RXY_AG      = 0xe308,
172
    RXY_AY      = 0xe35a,
173
    RXY_CG      = 0xe320,
174
    RXY_CY      = 0xe359,
175
    RXY_LB      = 0xe376,
176
    RXY_LG      = 0xe304,
177
    RXY_LGB     = 0xe377,
178
    RXY_LGF     = 0xe314,
179
    RXY_LGH     = 0xe315,
180
    RXY_LHY     = 0xe378,
181
    RXY_LLGC    = 0xe390,
182
    RXY_LLGF    = 0xe316,
183
    RXY_LLGH    = 0xe391,
184
    RXY_LMG     = 0xeb04,
185
    RXY_LRV     = 0xe31e,
186
    RXY_LRVG    = 0xe30f,
187
    RXY_LRVH    = 0xe31f,
188
    RXY_LY      = 0xe358,
189
    RXY_STCY    = 0xe372,
190
    RXY_STG     = 0xe324,
191
    RXY_STHY    = 0xe370,
192
    RXY_STMG    = 0xeb24,
193
    RXY_STRV    = 0xe33e,
194
    RXY_STRVG   = 0xe32f,
195
    RXY_STRVH   = 0xe33f,
196
    RXY_STY     = 0xe350,
197

  
198
    RX_A        = 0x5a,
199
    RX_C        = 0x59,
200
    RX_L        = 0x58,
201
    RX_LH       = 0x48,
202
    RX_ST       = 0x50,
203
    RX_STC      = 0x42,
204
    RX_STH      = 0x40,
205
} S390Opcode;
206

  
207
#define LD_SIGNED      0x04
208
#define LD_UINT8       0x00
209
#define LD_INT8        (LD_UINT8 | LD_SIGNED)
210
#define LD_UINT16      0x01
211
#define LD_INT16       (LD_UINT16 | LD_SIGNED)
212
#define LD_UINT32      0x02
213
#define LD_INT32       (LD_UINT32 | LD_SIGNED)
214
#define LD_UINT64      0x03
215
#define LD_INT64       (LD_UINT64 | LD_SIGNED)
216

  
217
#ifndef NDEBUG
218
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
219
    "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
220
    "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
221
};
222
#endif
223

  
224
/* Since R6 is a potential argument register, choose it last of the
225
   call-saved registers.  Likewise prefer the call-clobbered registers
226
   in reverse order to maximize the chance of avoiding the arguments.  */
25 227
static const int tcg_target_reg_alloc_order[] = {
228
    TCG_REG_R13,
229
    TCG_REG_R12,
230
    TCG_REG_R11,
231
    TCG_REG_R10,
232
    TCG_REG_R9,
233
    TCG_REG_R8,
234
    TCG_REG_R7,
235
    TCG_REG_R6,
236
    TCG_REG_R14,
237
    TCG_REG_R0,
238
    TCG_REG_R1,
239
    TCG_REG_R5,
240
    TCG_REG_R4,
241
    TCG_REG_R3,
242
    TCG_REG_R2,
26 243
};
27 244

  
28 245
static const int tcg_target_call_iarg_regs[] = {
246
    TCG_REG_R2,
247
    TCG_REG_R3,
248
    TCG_REG_R4,
249
    TCG_REG_R5,
250
    TCG_REG_R6,
29 251
};
30 252

  
31 253
static const int tcg_target_call_oarg_regs[] = {
254
    TCG_REG_R2,
255
    TCG_REG_R3,
256
};
257

  
258
#define S390_CC_EQ      8
259
#define S390_CC_LT      4
260
#define S390_CC_GT      2
261
#define S390_CC_OV      1
262
#define S390_CC_NE      (S390_CC_LT | S390_CC_GT)
263
#define S390_CC_LE      (S390_CC_LT | S390_CC_EQ)
264
#define S390_CC_GE      (S390_CC_GT | S390_CC_EQ)
265
#define S390_CC_NEVER   0
266
#define S390_CC_ALWAYS  15
267

  
268
/* Condition codes that result from a COMPARE and COMPARE LOGICAL.  */
269
static const uint8_t tcg_cond_to_s390_cond[10] = {
270
    [TCG_COND_EQ]  = S390_CC_EQ,
271
    [TCG_COND_NE]  = S390_CC_NE,
272
    [TCG_COND_LT]  = S390_CC_LT,
273
    [TCG_COND_LE]  = S390_CC_LE,
274
    [TCG_COND_GT]  = S390_CC_GT,
275
    [TCG_COND_GE]  = S390_CC_GE,
276
    [TCG_COND_LTU] = S390_CC_LT,
277
    [TCG_COND_LEU] = S390_CC_LE,
278
    [TCG_COND_GTU] = S390_CC_GT,
279
    [TCG_COND_GEU] = S390_CC_GE,
280
};
281

  
282
/* Condition codes that result from a LOAD AND TEST.  Here, we have no
283
   unsigned instruction variation, however since the test is vs zero we
284
   can re-map the outcomes appropriately.  */
285
static const uint8_t tcg_cond_to_ltr_cond[10] = {
286
    [TCG_COND_EQ]  = S390_CC_EQ,
287
    [TCG_COND_NE]  = S390_CC_NE,
288
    [TCG_COND_LT]  = S390_CC_LT,
289
    [TCG_COND_LE]  = S390_CC_LE,
290
    [TCG_COND_GT]  = S390_CC_GT,
291
    [TCG_COND_GE]  = S390_CC_GE,
292
    [TCG_COND_LTU] = S390_CC_NEVER,
293
    [TCG_COND_LEU] = S390_CC_EQ,
294
    [TCG_COND_GTU] = S390_CC_NE,
295
    [TCG_COND_GEU] = S390_CC_ALWAYS,
296
};
297

  
298
#ifdef CONFIG_SOFTMMU
299

  
300
#include "../../softmmu_defs.h"
301

  
302
static void *qemu_ld_helpers[4] = {
303
    __ldb_mmu,
304
    __ldw_mmu,
305
    __ldl_mmu,
306
    __ldq_mmu,
307
};
308

  
309
static void *qemu_st_helpers[4] = {
310
    __stb_mmu,
311
    __stw_mmu,
312
    __stl_mmu,
313
    __stq_mmu,
32 314
};
315
#endif
316

  
317
static uint8_t *tb_ret_addr;
318

  
319
/* A list of relevant facilities used by this translator.  Some of these
320
   are required for proper operation, and these are checked at startup.  */
321

  
322
#define FACILITY_ZARCH_ACTIVE	(1ULL << (63 - 2))
323
#define FACILITY_LONG_DISP	(1ULL << (63 - 18))
324
#define FACILITY_EXT_IMM	(1ULL << (63 - 21))
325
#define FACILITY_GEN_INST_EXT	(1ULL << (63 - 34))
326

  
327
static uint64_t facilities;
33 328

  
34 329
static void patch_reloc(uint8_t *code_ptr, int type,
35
                tcg_target_long value, tcg_target_long addend)
330
                        tcg_target_long value, tcg_target_long addend)
36 331
{
37
    tcg_abort();
332
    tcg_target_long code_ptr_tl = (tcg_target_long)code_ptr;
333
    tcg_target_long pcrel2;
334

  
335
    /* ??? Not the usual definition of "addend".  */
336
    pcrel2 = (value - (code_ptr_tl + addend)) >> 1;
337

  
338
    switch (type) {
339
    case R_390_PC16DBL:
340
        assert(pcrel2 == (int16_t)pcrel2);
341
        *(int16_t *)code_ptr = pcrel2;
342
        break;
343
    case R_390_PC32DBL:
344
        assert(pcrel2 == (int32_t)pcrel2);
345
        *(int32_t *)code_ptr = pcrel2;
346
        break;
347
    default:
348
        tcg_abort();
349
        break;
350
    }
38 351
}
39 352

  
40
static inline int tcg_target_get_call_iarg_regs_count(int flags)
353
static int tcg_target_get_call_iarg_regs_count(int flags)
41 354
{
42
    tcg_abort();
43
    return 0;
355
    return sizeof(tcg_target_call_iarg_regs) / sizeof(int);
44 356
}
45 357

  
46 358
/* parse target specific constraints */
47 359
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
48 360
{
49
    tcg_abort();
361
    const char *ct_str = *pct_str;
362

  
363
    switch (ct_str[0]) {
364
    case 'r':                  /* all registers */
365
        ct->ct |= TCG_CT_REG;
366
        tcg_regset_set32(ct->u.regs, 0, 0xffff);
367
        break;
368
    case 'R':                  /* not R0 */
369
        ct->ct |= TCG_CT_REG;
370
        tcg_regset_set32(ct->u.regs, 0, 0xffff);
371
        tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
372
        break;
373
    case 'L':                  /* qemu_ld/st constraint */
374
        ct->ct |= TCG_CT_REG;
375
        tcg_regset_set32(ct->u.regs, 0, 0xffff);
376
        tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
377
        tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
378
        break;
379
    case 'a':                  /* force R2 for division */
380
        ct->ct |= TCG_CT_REG;
381
        tcg_regset_clear(ct->u.regs);
382
        tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
383
        break;
384
    case 'b':                  /* force R3 for division */
385
        ct->ct |= TCG_CT_REG;
386
        tcg_regset_clear(ct->u.regs);
387
        tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
388
        break;
389
    case 'N':                  /* force immediate negate */
390
        ct->ct |= TCG_CT_CONST_NEG;
391
        break;
392
    case 'W':                  /* force 32-bit ("word") immediate */
393
        ct->ct |= TCG_CT_CONST_32;
394
        break;
395
    case 'I':
396
        ct->ct |= TCG_CT_CONST_ADDI;
397
        break;
398
    case 'K':
399
        ct->ct |= TCG_CT_CONST_MULI;
400
        break;
401
    case 'A':
402
        ct->ct |= TCG_CT_CONST_ANDI;
403
        break;
404
    case 'O':
405
        ct->ct |= TCG_CT_CONST_ORI;
406
        break;
407
    case 'X':
408
        ct->ct |= TCG_CT_CONST_XORI;
409
        break;
410
    case 'C':
411
        ct->ct |= TCG_CT_CONST_CMPI;
412
        break;
413
    default:
414
        return -1;
415
    }
416
    ct_str++;
417
    *pct_str = ct_str;
418

  
50 419
    return 0;
51 420
}
52 421

  
422
/* Immediates to be used with logical AND.  This is an optimization only,
423
   since a full 64-bit immediate AND can always be performed with 4 sequential
424
   NI[LH][LH] instructions.  What we're looking for is immediates that we
425
   can load efficiently, and the immediate load plus the reg-reg AND is
426
   smaller than the sequential NI's.  */
427

  
428
static int tcg_match_andi(int ct, tcg_target_ulong val)
429
{
430
    int i;
431

  
432
    if (facilities & FACILITY_EXT_IMM) {
433
        if (ct & TCG_CT_CONST_32) {
434
            /* All 32-bit ANDs can be performed with 1 48-bit insn.  */
435
            return 1;
436
        }
437

  
438
        /* Zero-extensions.  */
439
        if (val == 0xff || val == 0xffff || val == 0xffffffff) {
440
            return 1;
441
        }
442
    } else {
443
        if (ct & TCG_CT_CONST_32) {
444
            val = (uint32_t)val;
445
        } else if (val == 0xffffffff) {
446
            return 1;
447
        }
448
    }
449

  
450
    /* Try all 32-bit insns that can perform it in one go.  */
451
    for (i = 0; i < 4; i++) {
452
        tcg_target_ulong mask = ~(0xffffull << i*16);
453
        if ((val & mask) == mask) {
454
            return 1;
455
        }
456
    }
457

  
458
    /* Look for 16-bit values performing the mask.  These are better
459
       to load with LLI[LH][LH].  */
460
    for (i = 0; i < 4; i++) {
461
        tcg_target_ulong mask = 0xffffull << i*16;
462
        if ((val & mask) == val) {
463
            return 0;
464
        }
465
    }
466

  
467
    /* Look for 32-bit values performing the 64-bit mask.  These
468
       are better to load with LLI[LH]F, or if extended immediates
469
       not available, with a pair of LLI insns.  */
470
    if ((ct & TCG_CT_CONST_32) == 0) {
471
        if (val <= 0xffffffff || (val & 0xffffffff) == 0) {
472
            return 0;
473
        }
474
    }
475

  
476
    return 1;
477
}
478

  
479
/* Immediates to be used with logical OR.  This is an optimization only,
480
   since a full 64-bit immediate OR can always be performed with 4 sequential
481
   OI[LH][LH] instructions.  What we're looking for is immediates that we
482
   can load efficiently, and the immediate load plus the reg-reg OR is
483
   smaller than the sequential OI's.  */
484

  
485
static int tcg_match_ori(int ct, tcg_target_long val)
486
{
487
    if (facilities & FACILITY_EXT_IMM) {
488
        if (ct & TCG_CT_CONST_32) {
489
            /* All 32-bit ORs can be performed with 1 48-bit insn.  */
490
            return 1;
491
        }
492
    }
493

  
494
    /* Look for negative values.  These are best to load with LGHI.  */
495
    if (val < 0) {
496
        if (val == (int16_t)val) {
497
            return 0;
498
        }
499
        if (facilities & FACILITY_EXT_IMM) {
500
            if (val == (int32_t)val) {
501
                return 0;
502
            }
503
        }
504
    }
505

  
506
    return 1;
507
}
508

  
509
/* Immediates to be used with logical XOR.  This is almost, but not quite,
510
   only an optimization.  XOR with immediate is only supported with the
511
   extended-immediate facility.  That said, there are a few patterns for
512
   which it is better to load the value into a register first.  */
513

  
514
static int tcg_match_xori(int ct, tcg_target_long val)
515
{
516
    if ((facilities & FACILITY_EXT_IMM) == 0) {
517
        return 0;
518
    }
519

  
520
    if (ct & TCG_CT_CONST_32) {
521
        /* All 32-bit XORs can be performed with 1 48-bit insn.  */
522
        return 1;
523
    }
524

  
525
    /* Look for negative values.  These are best to load with LGHI.  */
526
    if (val < 0 && val == (int32_t)val) {
527
        return 0;
528
    }
529

  
530
    return 1;
531
}
532

  
533
/* Imediates to be used with comparisons.  */
534

  
535
static int tcg_match_cmpi(int ct, tcg_target_long val)
536
{
537
    if (facilities & FACILITY_EXT_IMM) {
538
        /* The COMPARE IMMEDIATE instruction is available.  */
539
        if (ct & TCG_CT_CONST_32) {
540
            /* We have a 32-bit immediate and can compare against anything.  */
541
            return 1;
542
        } else {
543
            /* ??? We have no insight here into whether the comparison is
544
               signed or unsigned.  The COMPARE IMMEDIATE insn uses a 32-bit
545
               signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
546
               a 32-bit unsigned immediate.  If we were to use the (semi)
547
               obvious "val == (int32_t)val" we would be enabling unsigned
548
               comparisons vs very large numbers.  The only solution is to
549
               take the intersection of the ranges.  */
550
            /* ??? Another possible solution is to simply lie and allow all
551
               constants here and force the out-of-range values into a temp
552
               register in tgen_cmp when we have knowledge of the actual
553
               comparison code in use.  */
554
            return val >= 0 && val <= 0x7fffffff;
555
        }
556
    } else {
557
        /* Only the LOAD AND TEST instruction is available.  */
558
        return val == 0;
559
    }
560
}
561

  
53 562
/* Test if a constant matches the constraint. */
54
static inline int tcg_target_const_match(tcg_target_long val,
55
                const TCGArgConstraint *arg_ct)
563
static int tcg_target_const_match(tcg_target_long val,
564
                                  const TCGArgConstraint *arg_ct)
56 565
{
57
    tcg_abort();
566
    int ct = arg_ct->ct;
567

  
568
    if (ct & TCG_CT_CONST) {
569
        return 1;
570
    }
571

  
572
    /* Handle the modifiers.  */
573
    if (ct & TCG_CT_CONST_NEG) {
574
        val = -val;
575
    }
576
    if (ct & TCG_CT_CONST_32) {
577
        val = (int32_t)val;
578
    }
579

  
580
    /* The following are mutually exclusive.  */
581
    if (ct & TCG_CT_CONST_ADDI) {
582
        /* Immediates that may be used with add.  If we have the
583
           extended-immediates facility then we have ADD IMMEDIATE
584
           with signed and unsigned 32-bit, otherwise we have only
585
           ADD HALFWORD IMMEDIATE with a signed 16-bit.  */
586
        if (facilities & FACILITY_EXT_IMM) {
587
            return val == (int32_t)val || val == (uint32_t)val;
588
        } else {
589
            return val == (int16_t)val;
590
        }
591
    } else if (ct & TCG_CT_CONST_MULI) {
592
        /* Immediates that may be used with multiply.  If we have the
593
           general-instruction-extensions, then we have MULTIPLY SINGLE
594
           IMMEDIATE with a signed 32-bit, otherwise we have only
595
           MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit.  */
596
        if (facilities & FACILITY_GEN_INST_EXT) {
597
            return val == (int32_t)val;
598
        } else {
599
            return val == (int16_t)val;
600
        }
601
    } else if (ct & TCG_CT_CONST_ANDI) {
602
        return tcg_match_andi(ct, val);
603
    } else if (ct & TCG_CT_CONST_ORI) {
604
        return tcg_match_ori(ct, val);
605
    } else if (ct & TCG_CT_CONST_XORI) {
606
        return tcg_match_xori(ct, val);
607
    } else if (ct & TCG_CT_CONST_CMPI) {
608
        return tcg_match_cmpi(ct, val);
609
    }
610

  
58 611
    return 0;
59 612
}
60 613

  
614
/* Emit instructions according to the given instruction format.  */
615

  
616
static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
617
{
618
    tcg_out16(s, (op << 8) | (r1 << 4) | r2);
619
}
620

  
621
static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
622
                             TCGReg r1, TCGReg r2)
623
{
624
    tcg_out32(s, (op << 16) | (r1 << 4) | r2);
625
}
626

  
627
static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
628
{
629
    tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
630
}
631

  
632
static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
633
{
634
    tcg_out16(s, op | (r1 << 4));
635
    tcg_out32(s, i2);
636
}
637

  
638
static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
639
                            TCGReg b2, TCGReg r3, int disp)
640
{
641
    tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
642
              | (disp & 0xfff));
643
}
644

  
645
static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
646
                             TCGReg b2, TCGReg r3, int disp)
647
{
648
    tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
649
    tcg_out32(s, (op & 0xff) | (b2 << 28)
650
              | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
651
}
652

  
653
#define tcg_out_insn_RX   tcg_out_insn_RS
654
#define tcg_out_insn_RXY  tcg_out_insn_RSY
655

  
656
/* Emit an opcode with "type-checking" of the format.  */
657
#define tcg_out_insn(S, FMT, OP, ...) \
658
    glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
659

  
660

  
661
/* emit 64-bit shifts */
662
static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
663
                         TCGReg src, TCGReg sh_reg, int sh_imm)
664
{
665
    tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
666
}
667

  
668
/* emit 32-bit shifts */
669
static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
670
                         TCGReg sh_reg, int sh_imm)
671
{
672
    tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
673
}
674

  
675
static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
676
{
677
    if (src != dst) {
678
        if (type == TCG_TYPE_I32) {
679
            tcg_out_insn(s, RR, LR, dst, src);
680
        } else {
681
            tcg_out_insn(s, RRE, LGR, dst, src);
682
        }
683
    }
684
}
685

  
61 686
/* load a register with an immediate value */
62
static inline void tcg_out_movi(TCGContext *s, TCGType type,
63
                int ret, tcg_target_long arg)
687
static void tcg_out_movi(TCGContext *s, TCGType type,
688
                         TCGReg ret, tcg_target_long sval)
64 689
{
65
    tcg_abort();
690
    static const S390Opcode lli_insns[4] = {
691
        RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
692
    };
693

  
694
    tcg_target_ulong uval = sval;
695
    int i;
696

  
697
    if (type == TCG_TYPE_I32) {
698
        uval = (uint32_t)sval;
699
        sval = (int32_t)sval;
700
    }
701

  
702
    /* Try all 32-bit insns that can load it in one go.  */
703
    if (sval >= -0x8000 && sval < 0x8000) {
704
        tcg_out_insn(s, RI, LGHI, ret, sval);
705
        return;
706
    }
707

  
708
    for (i = 0; i < 4; i++) {
709
        tcg_target_long mask = 0xffffull << i*16;
710
        if ((uval & mask) == uval) {
711
            tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
712
            return;
713
        }
714
    }
715

  
716
    /* Try all 48-bit insns that can load it in one go.  */
717
    if (facilities & FACILITY_EXT_IMM) {
718
        if (sval == (int32_t)sval) {
719
            tcg_out_insn(s, RIL, LGFI, ret, sval);
720
            return;
721
        }
722
        if (uval <= 0xffffffff) {
723
            tcg_out_insn(s, RIL, LLILF, ret, uval);
724
            return;
725
        }
726
        if ((uval & 0xffffffff) == 0) {
727
            tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
728
            return;
729
        }
730
    }
731

  
732
    /* Try for PC-relative address load.  */
733
    if ((sval & 1) == 0) {
734
        intptr_t off = (sval - (intptr_t)s->code_ptr) >> 1;
735
        if (off == (int32_t)off) {
736
            tcg_out_insn(s, RIL, LARL, ret, off);
737
            return;
738
        }
739
    }
740

  
741
    /* If extended immediates are not present, then we may have to issue
742
       several instructions to load the low 32 bits.  */
743
    if (!(facilities & FACILITY_EXT_IMM)) {
744
        /* A 32-bit unsigned value can be loaded in 2 insns.  And given
745
           that the lli_insns loop above did not succeed, we know that
746
           both insns are required.  */
747
        if (uval <= 0xffffffff) {
748
            tcg_out_insn(s, RI, LLILL, ret, uval);
749
            tcg_out_insn(s, RI, IILH, ret, uval >> 16);
750
            return;
751
        }
752

  
753
        /* If all high bits are set, the value can be loaded in 2 or 3 insns.
754
           We first want to make sure that all the high bits get set.  With
755
           luck the low 16-bits can be considered negative to perform that for
756
           free, otherwise we load an explicit -1.  */
757
        if (sval >> 31 >> 1 == -1) {
758
            if (uval & 0x8000) {
759
                tcg_out_insn(s, RI, LGHI, ret, uval);
760
            } else {
761
                tcg_out_insn(s, RI, LGHI, ret, -1);
762
                tcg_out_insn(s, RI, IILL, ret, uval);
763
            }
764
            tcg_out_insn(s, RI, IILH, ret, uval >> 16);
765
            return;
766
        }
767
    }
768

  
769
    /* If we get here, both the high and low parts have non-zero bits.  */
770

  
771
    /* Recurse to load the lower 32-bits.  */
772
    tcg_out_movi(s, TCG_TYPE_I32, ret, sval);
773

  
774
    /* Insert data into the high 32-bits.  */
775
    uval = uval >> 31 >> 1;
776
    if (facilities & FACILITY_EXT_IMM) {
777
        if (uval < 0x10000) {
778
            tcg_out_insn(s, RI, IIHL, ret, uval);
779
        } else if ((uval & 0xffff) == 0) {
780
            tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
781
        } else {
782
            tcg_out_insn(s, RIL, IIHF, ret, uval);
783
        }
784
    } else {
785
        if (uval & 0xffff) {
786
            tcg_out_insn(s, RI, IIHL, ret, uval);
787
        }
788
        if (uval & 0xffff0000) {
789
            tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
790
        }
791
    }
792
}
793

  
794

  
795
/* Emit a load/store type instruction.  Inputs are:
796
   DATA:     The register to be loaded or stored.
797
   BASE+OFS: The effective address.
798
   OPC_RX:   If the operation has an RX format opcode (e.g. STC), otherwise 0.
799
   OPC_RXY:  The RXY format opcode for the operation (e.g. STCY).  */
800

  
801
static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
802
                        TCGReg data, TCGReg base, TCGReg index,
803
                        tcg_target_long ofs)
804
{
805
    if (ofs < -0x80000 || ofs >= 0x80000) {
806
        /* Combine the low 16 bits of the offset with the actual load insn;
807
           the high 48 bits must come from an immediate load.  */
808
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs & ~0xffff);
809
        ofs &= 0xffff;
810

  
811
        /* If we were already given an index register, add it in.  */
812
        if (index != TCG_REG_NONE) {
813
            tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
814
        }
815
        index = TCG_TMP0;
816
    }
817

  
818
    if (opc_rx && ofs >= 0 && ofs < 0x1000) {
819
        tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
820
    } else {
821
        tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
822
    }
66 823
}
67 824

  
825

  
68 826
/* load data without address translation or endianness conversion */
69
static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
70
                int arg1, tcg_target_long arg2)
827
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
828
                              TCGReg base, tcg_target_long ofs)
71 829
{
72
    tcg_abort();
830
    if (type == TCG_TYPE_I32) {
831
        tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
832
    } else {
833
        tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
834
    }
73 835
}
74 836

  
75
static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
76
                              int arg1, tcg_target_long arg2)
837
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
838
                              TCGReg base, tcg_target_long ofs)
77 839
{
78
    tcg_abort();
840
    if (type == TCG_TYPE_I32) {
841
        tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
842
    } else {
843
        tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
844
    }
845
}
846

  
847
/* load data from an absolute host address */
848
static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
849
{
850
    tcg_target_long addr = (tcg_target_long)abs;
851

  
852
    if (facilities & FACILITY_GEN_INST_EXT) {
853
        tcg_target_long disp = (addr - (tcg_target_long)s->code_ptr) >> 1;
854
        if (disp == (int32_t)disp) {
855
            if (type == TCG_TYPE_I32) {
856
                tcg_out_insn(s, RIL, LRL, dest, disp);
857
            } else {
858
                tcg_out_insn(s, RIL, LGRL, dest, disp);
859
            }
860
            return;
861
        }
862
    }
863

  
864
    tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
865
    tcg_out_ld(s, type, dest, dest, addr & 0xffff);
866
}
867

  
868
static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
869
{
870
    if (facilities & FACILITY_EXT_IMM) {
871
        tcg_out_insn(s, RRE, LGBR, dest, src);
872
        return;
873
    }
874

  
875
    if (type == TCG_TYPE_I32) {
876
        if (dest == src) {
877
            tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
878
        } else {
879
            tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
880
        }
881
        tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
882
    } else {
883
        tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
884
        tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
885
    }
886
}
887

  
888
static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
889
{
890
    if (facilities & FACILITY_EXT_IMM) {
891
        tcg_out_insn(s, RRE, LLGCR, dest, src);
892
        return;
893
    }
894

  
895
    if (dest == src) {
896
        tcg_out_movi(s, type, TCG_TMP0, 0xff);
897
        src = TCG_TMP0;
898
    } else {
899
        tcg_out_movi(s, type, dest, 0xff);
900
    }
901
    if (type == TCG_TYPE_I32) {
902
        tcg_out_insn(s, RR, NR, dest, src);
903
    } else {
904
        tcg_out_insn(s, RRE, NGR, dest, src);
905
    }
906
}
907

  
908
static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
909
{
910
    if (facilities & FACILITY_EXT_IMM) {
911
        tcg_out_insn(s, RRE, LGHR, dest, src);
912
        return;
913
    }
914

  
915
    if (type == TCG_TYPE_I32) {
916
        if (dest == src) {
917
            tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
918
        } else {
919
            tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
920
        }
921
        tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
922
    } else {
923
        tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
924
        tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
925
    }
926
}
927

  
928
static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
929
{
930
    if (facilities & FACILITY_EXT_IMM) {
931
        tcg_out_insn(s, RRE, LLGHR, dest, src);
932
        return;
933
    }
934

  
935
    if (dest == src) {
936
        tcg_out_movi(s, type, TCG_TMP0, 0xffff);
937
        src = TCG_TMP0;
938
    } else {
939
        tcg_out_movi(s, type, dest, 0xffff);
940
    }
941
    if (type == TCG_TYPE_I32) {
942
        tcg_out_insn(s, RR, NR, dest, src);
943
    } else {
944
        tcg_out_insn(s, RRE, NGR, dest, src);
945
    }
946
}
947

  
948
static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
949
{
950
    tcg_out_insn(s, RRE, LGFR, dest, src);
951
}
952

  
953
static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
954
{
955
    tcg_out_insn(s, RRE, LLGFR, dest, src);
956
}
957

  
958
static inline void tgen32_addi(TCGContext *s, TCGReg dest, int32_t val)
959
{
960
    if (val == (int16_t)val) {
961
        tcg_out_insn(s, RI, AHI, dest, val);
962
    } else {
963
        tcg_out_insn(s, RIL, AFI, dest, val);
964
    }
965
}
966

  
967
static inline void tgen64_addi(TCGContext *s, TCGReg dest, int64_t val)
968
{
969
    if (val == (int16_t)val) {
970
        tcg_out_insn(s, RI, AGHI, dest, val);
971
    } else if (val == (int32_t)val) {
972
        tcg_out_insn(s, RIL, AGFI, dest, val);
973
    } else if (val == (uint32_t)val) {
974
        tcg_out_insn(s, RIL, ALGFI, dest, val);
975
    } else {
976
        tcg_abort();
977
    }
978

  
979
}
980

  
981
static void tgen64_andi(TCGContext *s, TCGReg dest, tcg_target_ulong val)
982
{
983
    static const S390Opcode ni_insns[4] = {
984
        RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
985
    };
986
    static const S390Opcode nif_insns[2] = {
987
        RIL_NILF, RIL_NIHF
988
    };
989

  
990
    int i;
991

  
992
    /* Look for no-op.  */
993
    if (val == -1) {
994
        return;
995
    }
996

  
997
    /* Look for the zero-extensions.  */
998
    if (val == 0xffffffff) {
999
        tgen_ext32u(s, dest, dest);
1000
        return;
1001
    }
1002

  
1003
    if (facilities & FACILITY_EXT_IMM) {
1004
        if (val == 0xff) {
1005
            tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
1006
            return;
1007
        }
1008
        if (val == 0xffff) {
1009
            tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
1010
            return;
1011
        }
1012

  
1013
        /* Try all 32-bit insns that can perform it in one go.  */
1014
        for (i = 0; i < 4; i++) {
1015
            tcg_target_ulong mask = ~(0xffffull << i*16);
1016
            if ((val & mask) == mask) {
1017
                tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
1018
                return;
1019
            }
1020
        }
1021

  
1022
        /* Try all 48-bit insns that can perform it in one go.  */
1023
        if (facilities & FACILITY_EXT_IMM) {
1024
            for (i = 0; i < 2; i++) {
1025
                tcg_target_ulong mask = ~(0xffffffffull << i*32);
1026
                if ((val & mask) == mask) {
1027
                    tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1028
                    return;
1029
                }
1030
            }
1031
        }
1032

  
1033
        /* Perform the AND via sequential modifications to the high and low
1034
           parts.  Do this via recursion to handle 16-bit vs 32-bit masks in
1035
           each half.  */
1036
        tgen64_andi(s, dest, val | 0xffffffff00000000ull);
1037
        tgen64_andi(s, dest, val | 0x00000000ffffffffull);
1038
    } else {
1039
        /* With no extended-immediate facility, just emit the sequence.  */
1040
        for (i = 0; i < 4; i++) {
1041
            tcg_target_ulong mask = 0xffffull << i*16;
1042
            if ((val & mask) != mask) {
1043
                tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
1044
            }
1045
        }
1046
    }
1047
}
1048

  
1049
static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1050
{
1051
    static const S390Opcode oi_insns[4] = {
1052
        RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1053
    };
1054
    static const S390Opcode nif_insns[2] = {
1055
        RIL_OILF, RIL_OIHF
1056
    };
1057

  
1058
    int i;
1059

  
1060
    /* Look for no-op.  */
1061
    if (val == 0) {
1062
        return;
1063
    }
1064

  
1065
    if (facilities & FACILITY_EXT_IMM) {
1066
        /* Try all 32-bit insns that can perform it in one go.  */
1067
        for (i = 0; i < 4; i++) {
1068
            tcg_target_ulong mask = (0xffffull << i*16);
1069
            if ((val & mask) != 0 && (val & ~mask) == 0) {
1070
                tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1071
                return;
1072
            }
1073
        }
1074

  
1075
        /* Try all 48-bit insns that can perform it in one go.  */
1076
        for (i = 0; i < 2; i++) {
1077
            tcg_target_ulong mask = (0xffffffffull << i*32);
1078
            if ((val & mask) != 0 && (val & ~mask) == 0) {
1079
                tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1080
                return;
1081
            }
1082
        }
1083

  
1084
        /* Perform the OR via sequential modifications to the high and
1085
           low parts.  Do this via recursion to handle 16-bit vs 32-bit
1086
           masks in each half.  */
1087
        tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1088
        tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1089
    } else {
1090
        /* With no extended-immediate facility, we don't need to be so
1091
           clever.  Just iterate over the insns and mask in the constant.  */
1092
        for (i = 0; i < 4; i++) {
1093
            tcg_target_ulong mask = (0xffffull << i*16);
1094
            if ((val & mask) != 0) {
1095
                tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1096
            }
1097
        }
1098
    }
1099
}
1100

  
1101
static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1102
{
1103
    /* Perform the xor by parts.  */
1104
    if (val & 0xffffffff) {
1105
        tcg_out_insn(s, RIL, XILF, dest, val);
1106
    }
1107
    if (val > 0xffffffff) {
1108
        tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1109
    }
1110
}
1111

  
1112
static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1113
                    TCGArg c2, int c2const)
1114
{
1115
    bool is_unsigned = (c > TCG_COND_GT);
1116
    if (c2const) {
1117
        if (c2 == 0) {
1118
            if (type == TCG_TYPE_I32) {
1119
                tcg_out_insn(s, RR, LTR, r1, r1);
1120
            } else {
1121
                tcg_out_insn(s, RRE, LTGR, r1, r1);
1122
            }
1123
            return tcg_cond_to_ltr_cond[c];
1124
        } else {
1125
            if (is_unsigned) {
1126
                if (type == TCG_TYPE_I32) {
1127
                    tcg_out_insn(s, RIL, CLFI, r1, c2);
1128
                } else {
1129
                    tcg_out_insn(s, RIL, CLGFI, r1, c2);
1130
                }
1131
            } else {
1132
                if (type == TCG_TYPE_I32) {
1133
                    tcg_out_insn(s, RIL, CFI, r1, c2);
1134
                } else {
1135
                    tcg_out_insn(s, RIL, CGFI, r1, c2);
1136
                }
1137
            }
1138
        }
1139
    } else {
1140
        if (is_unsigned) {
1141
            if (type == TCG_TYPE_I32) {
1142
                tcg_out_insn(s, RR, CLR, r1, c2);
1143
            } else {
1144
                tcg_out_insn(s, RRE, CLGR, r1, c2);
1145
            }
1146
        } else {
1147
            if (type == TCG_TYPE_I32) {
1148
                tcg_out_insn(s, RR, CR, r1, c2);
1149
            } else {
1150
                tcg_out_insn(s, RRE, CGR, r1, c2);
1151
            }
1152
        }
1153
    }
1154
    return tcg_cond_to_s390_cond[c];
1155
}
1156

  
1157
static void tgen_setcond(TCGContext *s, TCGType type, TCGCond c,
1158
                         TCGReg dest, TCGReg r1, TCGArg c2, int c2const)
1159
{
1160
    int cc = tgen_cmp(s, type, c, r1, c2, c2const);
1161

  
1162
    /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over:  */
1163
    tcg_out_movi(s, type, dest, 1);
1164
    tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1165
    tcg_out_movi(s, type, dest, 0);
1166
}
1167

  
1168
static void tgen_gotoi(TCGContext *s, int cc, tcg_target_long dest)
1169
{
1170
    tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1171
    if (off > -0x8000 && off < 0x7fff) {
1172
        tcg_out_insn(s, RI, BRC, cc, off);
1173
    } else if (off == (int32_t)off) {
1174
        tcg_out_insn(s, RIL, BRCL, cc, off);
1175
    } else {
1176
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1177
        tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1178
    }
1179
}
1180

  
1181
static void tgen_branch(TCGContext *s, int cc, int labelno)
1182
{
1183
    TCGLabel* l = &s->labels[labelno];
1184
    if (l->has_value) {
1185
        tgen_gotoi(s, cc, l->u.value);
1186
    } else if (USE_LONG_BRANCHES) {
1187
        tcg_out16(s, RIL_BRCL | (cc << 4));
1188
        tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, labelno, -2);
1189
        s->code_ptr += 4;
1190
    } else {
1191
        tcg_out16(s, RI_BRC | (cc << 4));
1192
        tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, labelno, -2);
1193
        s->code_ptr += 2;
1194
    }
1195
}
1196

  
1197
static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1198
                                TCGReg r1, TCGReg r2, int labelno)
1199
{
1200
    TCGLabel* l = &s->labels[labelno];
1201
    tcg_target_long off;
1202

  
1203
    if (l->has_value) {
1204
        off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1205
    } else {
1206
        /* We need to keep the offset unchanged for retranslation.  */
1207
        off = ((int16_t *)s->code_ptr)[1];
1208
        tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1209
    }
1210

  
1211
    tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1212
    tcg_out16(s, off);
1213
    tcg_out16(s, cc << 12 | (opc & 0xff));
1214
}
1215

  
1216
static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1217
                                    TCGReg r1, int i2, int labelno)
1218
{
1219
    TCGLabel* l = &s->labels[labelno];
1220
    tcg_target_long off;
1221

  
1222
    if (l->has_value) {
1223
        off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1224
    } else {
1225
        /* We need to keep the offset unchanged for retranslation.  */
1226
        off = ((int16_t *)s->code_ptr)[1];
1227
        tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1228
    }
1229

  
1230
    tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1231
    tcg_out16(s, off);
1232
    tcg_out16(s, (i2 << 8) | (opc & 0xff));
1233
}
1234

  
1235
static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1236
                        TCGReg r1, TCGArg c2, int c2const, int labelno)
1237
{
1238
    int cc;
1239

  
1240
    if (facilities & FACILITY_GEN_INST_EXT) {
1241
        bool is_unsigned = (c > TCG_COND_GT);
1242
        bool in_range;
1243
        S390Opcode opc;
1244

  
1245
        cc = tcg_cond_to_s390_cond[c];
1246

  
1247
        if (!c2const) {
1248
            opc = (type == TCG_TYPE_I32
1249
                   ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1250
                   : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1251
            tgen_compare_branch(s, opc, cc, r1, c2, labelno);
1252
            return;
1253
        }
1254

  
1255
        /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1256
           If the immediate we've been given does not fit that range, we'll
1257
           fall back to separate compare and branch instructions using the
1258
           larger comparison range afforded by COMPARE IMMEDIATE.  */
1259
        if (type == TCG_TYPE_I32) {
1260
            if (is_unsigned) {
1261
                opc = RIE_CLIJ;
1262
                in_range = (uint32_t)c2 == (uint8_t)c2;
1263
            } else {
1264
                opc = RIE_CIJ;
1265
                in_range = (int32_t)c2 == (int8_t)c2;
1266
            }
1267
        } else {
1268
            if (is_unsigned) {
1269
                opc = RIE_CLGIJ;
1270
                in_range = (uint64_t)c2 == (uint8_t)c2;
1271
            } else {
1272
                opc = RIE_CGIJ;
1273
                in_range = (int64_t)c2 == (int8_t)c2;
1274
            }
1275
        }
1276
        if (in_range) {
1277
            tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno);
1278
            return;
1279
        }
1280
    }
1281

  
1282
    cc = tgen_cmp(s, type, c, r1, c2, c2const);
1283
    tgen_branch(s, cc, labelno);
1284
}
1285

  
1286
static void tgen_calli(TCGContext *s, tcg_target_long dest)
1287
{
1288
    tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1289
    if (off == (int32_t)off) {
1290
        tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1291
    } else {
1292
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1293
        tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1294
    }
1295
}
1296

  
1297
static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data,
1298
                                   TCGReg base, TCGReg index, int disp)
1299
{
1300
#ifdef TARGET_WORDS_BIGENDIAN
1301
    const int bswap = 0;
1302
#else
1303
    const int bswap = 1;
1304
#endif
1305
    switch (opc) {
1306
    case LD_UINT8:
1307
        tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1308
        break;
1309
    case LD_INT8:
1310
        tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1311
        break;
1312
    case LD_UINT16:
1313
        if (bswap) {
1314
            /* swapped unsigned halfword load with upper bits zeroed */
1315
            tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1316
            tgen_ext16u(s, TCG_TYPE_I64, data, data);
1317
        } else {
1318
            tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1319
        }
1320
        break;
1321
    case LD_INT16:
1322
        if (bswap) {
1323
            /* swapped sign-extended halfword load */
1324
            tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1325
            tgen_ext16s(s, TCG_TYPE_I64, data, data);
1326
        } else {
1327
            tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1328
        }
1329
        break;
1330
    case LD_UINT32:
1331
        if (bswap) {
1332
            /* swapped unsigned int load with upper bits zeroed */
1333
            tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1334
            tgen_ext32u(s, data, data);
1335
        } else {
1336
            tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1337
        }
1338
        break;
1339
    case LD_INT32:
1340
        if (bswap) {
1341
            /* swapped sign-extended int load */
1342
            tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1343
            tgen_ext32s(s, data, data);
1344
        } else {
1345
            tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1346
        }
1347
        break;
1348
    case LD_UINT64:
1349
        if (bswap) {
1350
            tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1351
        } else {
1352
            tcg_out_insn(s, RXY, LG, data, base, index, disp);
1353
        }
1354
        break;
1355
    default:
1356
        tcg_abort();
1357
    }
1358
}
1359

  
1360
static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data,
1361
                                   TCGReg base, TCGReg index, int disp)
1362
{
1363
#ifdef TARGET_WORDS_BIGENDIAN
1364
    const int bswap = 0;
1365
#else
1366
    const int bswap = 1;
1367
#endif
1368
    switch (opc) {
1369
    case LD_UINT8:
1370
        if (disp >= 0 && disp < 0x1000) {
1371
            tcg_out_insn(s, RX, STC, data, base, index, disp);
1372
        } else {
1373
            tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1374
        }
1375
        break;
1376
    case LD_UINT16:
1377
        if (bswap) {
1378
            tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1379
        } else if (disp >= 0 && disp < 0x1000) {
1380
            tcg_out_insn(s, RX, STH, data, base, index, disp);
1381
        } else {
1382
            tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1383
        }
1384
        break;
1385
    case LD_UINT32:
1386
        if (bswap) {
1387
            tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1388
        } else if (disp >= 0 && disp < 0x1000) {
1389
            tcg_out_insn(s, RX, ST, data, base, index, disp);
1390
        } else {
1391
            tcg_out_insn(s, RXY, STY, data, base, index, disp);
1392
        }
1393
        break;
1394
    case LD_UINT64:
1395
        if (bswap) {
1396
            tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1397
        } else {
1398
            tcg_out_insn(s, RXY, STG, data, base, index, disp);
1399
        }
1400
        break;
1401
    default:
1402
        tcg_abort();
1403
    }
1404
}
1405

  
1406
#if defined(CONFIG_SOFTMMU)
1407
static void tgen64_andi_tmp(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1408
{
1409
    if (tcg_match_andi(0, val)) {
1410
        tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, val);
1411
        tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
1412
    } else {
1413
        tgen64_andi(s, dest, val);
1414
    }
1415
}
1416

  
1417
static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
1418
                                  TCGReg addr_reg, int mem_index, int opc,
1419
                                  uint16_t **label2_ptr_p, int is_store)
1420
{
1421
    const TCGReg arg0 = TCG_REG_R2;
1422
    const TCGReg arg1 = TCG_REG_R3;
1423
    int s_bits = opc & 3;
1424
    uint16_t *label1_ptr;
1425
    tcg_target_long ofs;
1426

  
1427
    if (TARGET_LONG_BITS == 32) {
1428
        tgen_ext32u(s, arg0, addr_reg);
1429
    } else {
1430
        tcg_out_mov(s, TCG_TYPE_I64, arg0, addr_reg);
1431
    }
1432

  
1433
    tcg_out_sh64(s, RSY_SRLG, arg1, addr_reg, TCG_REG_NONE,
1434
                 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1435

  
1436
    tgen64_andi_tmp(s, arg0, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
1437
    tgen64_andi_tmp(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1438

  
1439
    if (is_store) {
1440
        ofs = offsetof(CPUState, tlb_table[mem_index][0].addr_write);
1441
    } else {
1442
        ofs = offsetof(CPUState, tlb_table[mem_index][0].addr_read);
1443
    }
1444
    assert(ofs < 0x80000);
1445

  
1446
    if (TARGET_LONG_BITS == 32) {
1447
        tcg_out_mem(s, RX_C, RXY_CY, arg0, arg1, TCG_AREG0, ofs);
1448
    } else {
1449
        tcg_out_mem(s, 0, RXY_CG, arg0, arg1, TCG_AREG0, ofs);
1450
    }
1451

  
1452
    if (TARGET_LONG_BITS == 32) {
1453
        tgen_ext32u(s, arg0, addr_reg);
1454
    } else {
1455
        tcg_out_mov(s, TCG_TYPE_I64, arg0, addr_reg);
1456
    }
1457

  
1458
    label1_ptr = (uint16_t*)s->code_ptr;
1459

  
1460
    /* je label1 (offset will be patched in later) */
1461
    tcg_out_insn(s, RI, BRC, S390_CC_EQ, 0);
1462

  
1463
    /* call load/store helper */
1464
    if (is_store) {
1465
        /* Make sure to zero-extend the value to the full register
1466
           for the calling convention.  */
1467
        switch (opc) {
1468
        case LD_UINT8:
1469
            tgen_ext8u(s, TCG_TYPE_I64, arg1, data_reg);
1470
            break;
1471
        case LD_UINT16:
1472
            tgen_ext16u(s, TCG_TYPE_I64, arg1, data_reg);
1473
            break;
1474
        case LD_UINT32:
1475
            tgen_ext32u(s, arg1, data_reg);
1476
            break;
1477
        case LD_UINT64:
1478
            tcg_out_mov(s, TCG_TYPE_I64, arg1, data_reg);
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff