Statistics
| Branch: | Revision:

root / target-s390x / translate.c @ 683bb9a8

History | View | Annotate | Download (118.2 kB)

1
/*
2
 *  S/390 translation
3
 *
4
 *  Copyright (c) 2009 Ulrich Hecht
5
 *  Copyright (c) 2010 Alexander Graf
6
 *
7
 * This library is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2 of the License, or (at your option) any later version.
11
 *
12
 * This library is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19
 */
20

    
21
/* #define DEBUG_INLINE_BRANCHES */
22
#define S390X_DEBUG_DISAS
23
/* #define S390X_DEBUG_DISAS_VERBOSE */
24

    
25
#ifdef S390X_DEBUG_DISAS_VERBOSE
26
#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27
#else
28
#  define LOG_DISAS(...) do { } while (0)
29
#endif
30

    
31
#include "cpu.h"
32
#include "disas/disas.h"
33
#include "tcg-op.h"
34
#include "qemu/log.h"
35
#include "qemu/host-utils.h"
36

    
37
/* global register indexes */
38
static TCGv_ptr cpu_env;
39

    
40
#include "exec/gen-icount.h"
41
#include "helper.h"
42
#define GEN_HELPER 1
43
#include "helper.h"
44

    
45

    
46
/* Information that (most) every instruction needs to manipulate.  */
47
typedef struct DisasContext DisasContext;
48
typedef struct DisasInsn DisasInsn;
49
typedef struct DisasFields DisasFields;
50

    
51
struct DisasContext {
52
    struct TranslationBlock *tb;
53
    const DisasInsn *insn;
54
    DisasFields *fields;
55
    uint64_t pc, next_pc;
56
    enum cc_op cc_op;
57
    bool singlestep_enabled;
58
    int is_jmp;
59
};
60

    
61
/* Information carried about a condition to be evaluated.  */
62
typedef struct {
63
    TCGCond cond:8;
64
    bool is_64;
65
    bool g1;
66
    bool g2;
67
    union {
68
        struct { TCGv_i64 a, b; } s64;
69
        struct { TCGv_i32 a, b; } s32;
70
    } u;
71
} DisasCompare;
72

    
73
#define DISAS_EXCP 4
74

    
75
static void gen_op_calc_cc(DisasContext *s);
76

    
77
#ifdef DEBUG_INLINE_BRANCHES
78
static uint64_t inline_branch_hit[CC_OP_MAX];
79
static uint64_t inline_branch_miss[CC_OP_MAX];
80
#endif
81

    
82
static inline void debug_insn(uint64_t insn)
83
{
84
    LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
85
}
86

    
87
static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88
{
89
    if (!(s->tb->flags & FLAG_MASK_64)) {
90
        if (s->tb->flags & FLAG_MASK_32) {
91
            return pc | 0x80000000;
92
        }
93
    }
94
    return pc;
95
}
96

    
97
void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98
                    int flags)
99
{
100
    int i;
101

    
102
    if (env->cc_op > 3) {
103
        cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104
                    env->psw.mask, env->psw.addr, cc_name(env->cc_op));
105
    } else {
106
        cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107
                    env->psw.mask, env->psw.addr, env->cc_op);
108
    }
109

    
110
    for (i = 0; i < 16; i++) {
111
        cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
112
        if ((i % 4) == 3) {
113
            cpu_fprintf(f, "\n");
114
        } else {
115
            cpu_fprintf(f, " ");
116
        }
117
    }
118

    
119
    for (i = 0; i < 16; i++) {
120
        cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
121
        if ((i % 4) == 3) {
122
            cpu_fprintf(f, "\n");
123
        } else {
124
            cpu_fprintf(f, " ");
125
        }
126
    }
127

    
128
#ifndef CONFIG_USER_ONLY
129
    for (i = 0; i < 16; i++) {
130
        cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
131
        if ((i % 4) == 3) {
132
            cpu_fprintf(f, "\n");
133
        } else {
134
            cpu_fprintf(f, " ");
135
        }
136
    }
137
#endif
138

    
139
#ifdef DEBUG_INLINE_BRANCHES
140
    for (i = 0; i < CC_OP_MAX; i++) {
141
        cpu_fprintf(f, "  %15s = %10ld\t%10ld\n", cc_name(i),
142
                    inline_branch_miss[i], inline_branch_hit[i]);
143
    }
144
#endif
145

    
146
    cpu_fprintf(f, "\n");
147
}
148

    
149
static TCGv_i64 psw_addr;
150
static TCGv_i64 psw_mask;
151

    
152
static TCGv_i32 cc_op;
153
static TCGv_i64 cc_src;
154
static TCGv_i64 cc_dst;
155
static TCGv_i64 cc_vr;
156

    
157
static char cpu_reg_names[32][4];
158
static TCGv_i64 regs[16];
159
static TCGv_i64 fregs[16];
160

    
161
static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
162

    
163
void s390x_translate_init(void)
164
{
165
    int i;
166

    
167
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168
    psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169
                                      offsetof(CPUS390XState, psw.addr),
170
                                      "psw_addr");
171
    psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172
                                      offsetof(CPUS390XState, psw.mask),
173
                                      "psw_mask");
174

    
175
    cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
176
                                   "cc_op");
177
    cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
178
                                    "cc_src");
179
    cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
180
                                    "cc_dst");
181
    cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
182
                                   "cc_vr");
183

    
184
    for (i = 0; i < 16; i++) {
185
        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186
        regs[i] = tcg_global_mem_new(TCG_AREG0,
187
                                     offsetof(CPUS390XState, regs[i]),
188
                                     cpu_reg_names[i]);
189
    }
190

    
191
    for (i = 0; i < 16; i++) {
192
        snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193
        fregs[i] = tcg_global_mem_new(TCG_AREG0,
194
                                      offsetof(CPUS390XState, fregs[i].d),
195
                                      cpu_reg_names[i + 16]);
196
    }
197

    
198
    /* register helpers */
199
#define GEN_HELPER 2
200
#include "helper.h"
201
}
202

    
203
static inline TCGv_i64 load_reg(int reg)
204
{
205
    TCGv_i64 r = tcg_temp_new_i64();
206
    tcg_gen_mov_i64(r, regs[reg]);
207
    return r;
208
}
209

    
210
static inline TCGv_i64 load_freg(int reg)
211
{
212
    TCGv_i64 r = tcg_temp_new_i64();
213
    tcg_gen_mov_i64(r, fregs[reg]);
214
    return r;
215
}
216

    
217
static inline TCGv_i32 load_freg32(int reg)
218
{
219
    TCGv_i32 r = tcg_temp_new_i32();
220
#if HOST_LONG_BITS == 32
221
    tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
222
#else
223
    tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
224
#endif
225
    return r;
226
}
227

    
228
static inline TCGv_i64 load_freg32_i64(int reg)
229
{
230
    TCGv_i64 r = tcg_temp_new_i64();
231
    tcg_gen_shri_i64(r, fregs[reg], 32);
232
    return r;
233
}
234

    
235
static inline TCGv_i32 load_reg32(int reg)
236
{
237
    TCGv_i32 r = tcg_temp_new_i32();
238
    tcg_gen_trunc_i64_i32(r, regs[reg]);
239
    return r;
240
}
241

    
242
static inline TCGv_i64 load_reg32_i64(int reg)
243
{
244
    TCGv_i64 r = tcg_temp_new_i64();
245
    tcg_gen_ext32s_i64(r, regs[reg]);
246
    return r;
247
}
248

    
249
static inline void store_reg(int reg, TCGv_i64 v)
250
{
251
    tcg_gen_mov_i64(regs[reg], v);
252
}
253

    
254
static inline void store_freg(int reg, TCGv_i64 v)
255
{
256
    tcg_gen_mov_i64(fregs[reg], v);
257
}
258

    
259
static inline void store_reg32(int reg, TCGv_i32 v)
260
{
261
    /* 32 bit register writes keep the upper half */
262
#if HOST_LONG_BITS == 32
263
    tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
264
#else
265
    tcg_gen_deposit_i64(regs[reg], regs[reg],
266
                        MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
267
#endif
268
}
269

    
270
static inline void store_reg32_i64(int reg, TCGv_i64 v)
271
{
272
    /* 32 bit register writes keep the upper half */
273
    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
274
}
275

    
276
static inline void store_reg32h_i64(int reg, TCGv_i64 v)
277
{
278
    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
279
}
280

    
281
static inline void store_freg32(int reg, TCGv_i32 v)
282
{
283
    /* 32 bit register writes keep the lower half */
284
#if HOST_LONG_BITS == 32
285
    tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
286
#else
287
    tcg_gen_deposit_i64(fregs[reg], fregs[reg],
288
                        MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
289
#endif
290
}
291

    
292
static inline void store_freg32_i64(int reg, TCGv_i64 v)
293
{
294
    tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
295
}
296

    
297
static inline void return_low128(TCGv_i64 dest)
298
{
299
    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
300
}
301

    
302
static inline void update_psw_addr(DisasContext *s)
303
{
304
    /* psw.addr */
305
    tcg_gen_movi_i64(psw_addr, s->pc);
306
}
307

    
308
static inline void potential_page_fault(DisasContext *s)
309
{
310
#ifndef CONFIG_USER_ONLY
311
    update_psw_addr(s);
312
    gen_op_calc_cc(s);
313
#endif
314
}
315

    
316
static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
317
{
318
    return (uint64_t)cpu_lduw_code(env, pc);
319
}
320

    
321
static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
322
{
323
    return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
324
}
325

    
326
static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
327
{
328
    return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
329
}
330

    
331
static inline int get_mem_index(DisasContext *s)
332
{
333
    switch (s->tb->flags & FLAG_MASK_ASC) {
334
    case PSW_ASC_PRIMARY >> 32:
335
        return 0;
336
    case PSW_ASC_SECONDARY >> 32:
337
        return 1;
338
    case PSW_ASC_HOME >> 32:
339
        return 2;
340
    default:
341
        tcg_abort();
342
        break;
343
    }
344
}
345

    
346
static void gen_exception(int excp)
347
{
348
    TCGv_i32 tmp = tcg_const_i32(excp);
349
    gen_helper_exception(cpu_env, tmp);
350
    tcg_temp_free_i32(tmp);
351
}
352

    
353
static void gen_program_exception(DisasContext *s, int code)
354
{
355
    TCGv_i32 tmp;
356

    
357
    /* Remember what pgm exeption this was.  */
358
    tmp = tcg_const_i32(code);
359
    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
360
    tcg_temp_free_i32(tmp);
361

    
362
    tmp = tcg_const_i32(s->next_pc - s->pc);
363
    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
364
    tcg_temp_free_i32(tmp);
365

    
366
    /* Advance past instruction.  */
367
    s->pc = s->next_pc;
368
    update_psw_addr(s);
369

    
370
    /* Save off cc.  */
371
    gen_op_calc_cc(s);
372

    
373
    /* Trigger exception.  */
374
    gen_exception(EXCP_PGM);
375

    
376
    /* End TB here.  */
377
    s->is_jmp = DISAS_EXCP;
378
}
379

    
380
static inline void gen_illegal_opcode(DisasContext *s)
381
{
382
    gen_program_exception(s, PGM_SPECIFICATION);
383
}
384

    
385
static inline void check_privileged(DisasContext *s)
386
{
387
    if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388
        gen_program_exception(s, PGM_PRIVILEGED);
389
    }
390
}
391

    
392
static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
393
{
394
    TCGv_i64 tmp;
395

    
396
    /* 31-bitify the immediate part; register contents are dealt with below */
397
    if (!(s->tb->flags & FLAG_MASK_64)) {
398
        d2 &= 0x7fffffffUL;
399
    }
400

    
401
    if (x2) {
402
        if (d2) {
403
            tmp = tcg_const_i64(d2);
404
            tcg_gen_add_i64(tmp, tmp, regs[x2]);
405
        } else {
406
            tmp = load_reg(x2);
407
        }
408
        if (b2) {
409
            tcg_gen_add_i64(tmp, tmp, regs[b2]);
410
        }
411
    } else if (b2) {
412
        if (d2) {
413
            tmp = tcg_const_i64(d2);
414
            tcg_gen_add_i64(tmp, tmp, regs[b2]);
415
        } else {
416
            tmp = load_reg(b2);
417
        }
418
    } else {
419
        tmp = tcg_const_i64(d2);
420
    }
421

    
422
    /* 31-bit mode mask if there are values loaded from registers */
423
    if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
424
        tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
425
    }
426

    
427
    return tmp;
428
}
429

    
430
static void gen_op_movi_cc(DisasContext *s, uint32_t val)
431
{
432
    s->cc_op = CC_OP_CONST0 + val;
433
}
434

    
435
static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
436
{
437
    tcg_gen_discard_i64(cc_src);
438
    tcg_gen_mov_i64(cc_dst, dst);
439
    tcg_gen_discard_i64(cc_vr);
440
    s->cc_op = op;
441
}
442

    
443
static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
444
{
445
    tcg_gen_discard_i64(cc_src);
446
    tcg_gen_extu_i32_i64(cc_dst, dst);
447
    tcg_gen_discard_i64(cc_vr);
448
    s->cc_op = op;
449
}
450

    
451
static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
452
                                  TCGv_i64 dst)
453
{
454
    tcg_gen_mov_i64(cc_src, src);
455
    tcg_gen_mov_i64(cc_dst, dst);
456
    tcg_gen_discard_i64(cc_vr);
457
    s->cc_op = op;
458
}
459

    
460
static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
461
                                  TCGv_i32 dst)
462
{
463
    tcg_gen_extu_i32_i64(cc_src, src);
464
    tcg_gen_extu_i32_i64(cc_dst, dst);
465
    tcg_gen_discard_i64(cc_vr);
466
    s->cc_op = op;
467
}
468

    
469
static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470
                                  TCGv_i64 dst, TCGv_i64 vr)
471
{
472
    tcg_gen_mov_i64(cc_src, src);
473
    tcg_gen_mov_i64(cc_dst, dst);
474
    tcg_gen_mov_i64(cc_vr, vr);
475
    s->cc_op = op;
476
}
477

    
478
static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
479
{
480
    gen_op_update1_cc_i32(s, CC_OP_NZ, val);
481
}
482

    
483
static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
484
{
485
    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
486
}
487

    
488
static inline void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
489
{
490
    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
491
}
492

    
493
static inline void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
494
{
495
    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
496
}
497

    
498
static inline void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
499
{
500
    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
501
}
502

    
503
static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
504
                          enum cc_op cond)
505
{
506
    gen_op_update2_cc_i32(s, cond, v1, v2);
507
}
508

    
509
static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
510
                          enum cc_op cond)
511
{
512
    gen_op_update2_cc_i64(s, cond, v1, v2);
513
}
514

    
515
static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
516
{
517
    cmp_32(s, v1, v2, CC_OP_LTGT_32);
518
}
519

    
520
static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
521
{
522
    cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
523
}
524

    
525
static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
526
{
527
    /* XXX optimize for the constant? put it in s? */
528
    TCGv_i32 tmp = tcg_const_i32(v2);
529
    cmp_32(s, v1, tmp, CC_OP_LTGT_32);
530
    tcg_temp_free_i32(tmp);
531
}
532

    
533
static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
534
{
535
    TCGv_i32 tmp = tcg_const_i32(v2);
536
    cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
537
    tcg_temp_free_i32(tmp);
538
}
539

    
540
static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
541
{
542
    cmp_64(s, v1, v2, CC_OP_LTGT_64);
543
}
544

    
545
static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
546
{
547
    cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
548
}
549

    
550
static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
551
{
552
    TCGv_i64 tmp = tcg_const_i64(v2);
553
    cmp_s64(s, v1, tmp);
554
    tcg_temp_free_i64(tmp);
555
}
556

    
557
static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
558
{
559
    TCGv_i64 tmp = tcg_const_i64(v2);
560
    cmp_u64(s, v1, tmp);
561
    tcg_temp_free_i64(tmp);
562
}
563

    
564
static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
565
{
566
    gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
567
}
568

    
569
static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
570
{
571
    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
572
}
573

    
574
/* CC value is in env->cc_op */
575
static inline void set_cc_static(DisasContext *s)
576
{
577
    tcg_gen_discard_i64(cc_src);
578
    tcg_gen_discard_i64(cc_dst);
579
    tcg_gen_discard_i64(cc_vr);
580
    s->cc_op = CC_OP_STATIC;
581
}
582

    
583
static inline void gen_op_set_cc_op(DisasContext *s)
584
{
585
    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
586
        tcg_gen_movi_i32(cc_op, s->cc_op);
587
    }
588
}
589

    
590
static inline void gen_update_cc_op(DisasContext *s)
591
{
592
    gen_op_set_cc_op(s);
593
}
594

    
595
/* calculates cc into cc_op */
596
static void gen_op_calc_cc(DisasContext *s)
597
{
598
    TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
599
    TCGv_i64 dummy = tcg_const_i64(0);
600

    
601
    switch (s->cc_op) {
602
    case CC_OP_CONST0:
603
    case CC_OP_CONST1:
604
    case CC_OP_CONST2:
605
    case CC_OP_CONST3:
606
        /* s->cc_op is the cc value */
607
        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
608
        break;
609
    case CC_OP_STATIC:
610
        /* env->cc_op already is the cc value */
611
        break;
612
    case CC_OP_NZ:
613
    case CC_OP_ABS_64:
614
    case CC_OP_NABS_64:
615
    case CC_OP_ABS_32:
616
    case CC_OP_NABS_32:
617
    case CC_OP_LTGT0_32:
618
    case CC_OP_LTGT0_64:
619
    case CC_OP_COMP_32:
620
    case CC_OP_COMP_64:
621
    case CC_OP_NZ_F32:
622
    case CC_OP_NZ_F64:
623
        /* 1 argument */
624
        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
625
        break;
626
    case CC_OP_ICM:
627
    case CC_OP_LTGT_32:
628
    case CC_OP_LTGT_64:
629
    case CC_OP_LTUGTU_32:
630
    case CC_OP_LTUGTU_64:
631
    case CC_OP_TM_32:
632
    case CC_OP_TM_64:
633
    case CC_OP_SLA_32:
634
    case CC_OP_SLA_64:
635
    case CC_OP_NZ_F128:
636
        /* 2 arguments */
637
        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
638
        break;
639
    case CC_OP_ADD_64:
640
    case CC_OP_ADDU_64:
641
    case CC_OP_ADDC_64:
642
    case CC_OP_SUB_64:
643
    case CC_OP_SUBU_64:
644
    case CC_OP_SUBB_64:
645
    case CC_OP_ADD_32:
646
    case CC_OP_ADDU_32:
647
    case CC_OP_ADDC_32:
648
    case CC_OP_SUB_32:
649
    case CC_OP_SUBU_32:
650
    case CC_OP_SUBB_32:
651
        /* 3 arguments */
652
        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
653
        break;
654
    case CC_OP_DYNAMIC:
655
        /* unknown operation - assume 3 arguments and cc_op in env */
656
        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
657
        break;
658
    default:
659
        tcg_abort();
660
    }
661

    
662
    tcg_temp_free_i32(local_cc_op);
663
    tcg_temp_free_i64(dummy);
664

    
665
    /* We now have cc in cc_op as constant */
666
    set_cc_static(s);
667
}
668

    
669
static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
670
{
671
    debug_insn(insn);
672

    
673
    *r1 = (insn >> 4) & 0xf;
674
    *r2 = insn & 0xf;
675
}
676

    
677
static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
678
                                 int *x2, int *b2, int *d2)
679
{
680
    debug_insn(insn);
681

    
682
    *r1 = (insn >> 20) & 0xf;
683
    *x2 = (insn >> 16) & 0xf;
684
    *b2 = (insn >> 12) & 0xf;
685
    *d2 = insn & 0xfff;
686

    
687
    return get_address(s, *x2, *b2, *d2);
688
}
689

    
690
static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
691
                             int *b2, int *d2)
692
{
693
    debug_insn(insn);
694

    
695
    *r1 = (insn >> 20) & 0xf;
696
    /* aka m3 */
697
    *r3 = (insn >> 16) & 0xf;
698
    *b2 = (insn >> 12) & 0xf;
699
    *d2 = insn & 0xfff;
700
}
701

    
702
static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
703
                                 int *b1, int *d1)
704
{
705
    debug_insn(insn);
706

    
707
    *i2 = (insn >> 16) & 0xff;
708
    *b1 = (insn >> 12) & 0xf;
709
    *d1 = insn & 0xfff;
710

    
711
    return get_address(s, 0, *b1, *d1);
712
}
713

    
714
static int use_goto_tb(DisasContext *s, uint64_t dest)
715
{
716
    /* NOTE: we handle the case where the TB spans two pages here */
717
    return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
718
             || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
719
            && !s->singlestep_enabled
720
            && !(s->tb->cflags & CF_LAST_IO));
721
}
722

    
723
static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
724
{
725
    gen_update_cc_op(s);
726

    
727
    if (use_goto_tb(s, pc)) {
728
        tcg_gen_goto_tb(tb_num);
729
        tcg_gen_movi_i64(psw_addr, pc);
730
        tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
731
    } else {
732
        /* jump to another page: currently not optimized */
733
        tcg_gen_movi_i64(psw_addr, pc);
734
        tcg_gen_exit_tb(0);
735
    }
736
}
737

    
738
static inline void account_noninline_branch(DisasContext *s, int cc_op)
739
{
740
#ifdef DEBUG_INLINE_BRANCHES
741
    inline_branch_miss[cc_op]++;
742
#endif
743
}
744

    
745
static inline void account_inline_branch(DisasContext *s, int cc_op)
746
{
747
#ifdef DEBUG_INLINE_BRANCHES
748
    inline_branch_hit[cc_op]++;
749
#endif
750
}
751

    
752
/* Table of mask values to comparison codes, given a comparison as input.
753
   For a true comparison CC=3 will never be set, but we treat this
754
   conservatively for possible use when CC=3 indicates overflow.  */
755
static const TCGCond ltgt_cond[16] = {
756
    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
757
    TCG_COND_GT,     TCG_COND_NEVER,     /*    |    | GT | x */
758
    TCG_COND_LT,     TCG_COND_NEVER,     /*    | LT |    | x */
759
    TCG_COND_NE,     TCG_COND_NEVER,     /*    | LT | GT | x */
760
    TCG_COND_EQ,     TCG_COND_NEVER,     /* EQ |    |    | x */
761
    TCG_COND_GE,     TCG_COND_NEVER,     /* EQ |    | GT | x */
762
    TCG_COND_LE,     TCG_COND_NEVER,     /* EQ | LT |    | x */
763
    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
764
};
765

    
766
/* Table of mask values to comparison codes, given a logic op as input.
767
   For such, only CC=0 and CC=1 should be possible.  */
768
static const TCGCond nz_cond[16] = {
769
    /*    |    | x | x */
770
    TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
771
    /*    | NE | x | x */
772
    TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
773
    /* EQ |    | x | x */
774
    TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
775
    /* EQ | NE | x | x */
776
    TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
777
};
778

    
779
/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
780
   details required to generate a TCG comparison.  */
781
static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
782
{
783
    TCGCond cond;
784
    enum cc_op old_cc_op = s->cc_op;
785

    
786
    if (mask == 15 || mask == 0) {
787
        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
788
        c->u.s32.a = cc_op;
789
        c->u.s32.b = cc_op;
790
        c->g1 = c->g2 = true;
791
        c->is_64 = false;
792
        return;
793
    }
794

    
795
    /* Find the TCG condition for the mask + cc op.  */
796
    switch (old_cc_op) {
797
    case CC_OP_LTGT0_32:
798
    case CC_OP_LTGT0_64:
799
    case CC_OP_LTGT_32:
800
    case CC_OP_LTGT_64:
801
        cond = ltgt_cond[mask];
802
        if (cond == TCG_COND_NEVER) {
803
            goto do_dynamic;
804
        }
805
        account_inline_branch(s, old_cc_op);
806
        break;
807

    
808
    case CC_OP_LTUGTU_32:
809
    case CC_OP_LTUGTU_64:
810
        cond = tcg_unsigned_cond(ltgt_cond[mask]);
811
        if (cond == TCG_COND_NEVER) {
812
            goto do_dynamic;
813
        }
814
        account_inline_branch(s, old_cc_op);
815
        break;
816

    
817
    case CC_OP_NZ:
818
        cond = nz_cond[mask];
819
        if (cond == TCG_COND_NEVER) {
820
            goto do_dynamic;
821
        }
822
        account_inline_branch(s, old_cc_op);
823
        break;
824

    
825
    case CC_OP_TM_32:
826
    case CC_OP_TM_64:
827
        switch (mask) {
828
        case 8:
829
            cond = TCG_COND_EQ;
830
            break;
831
        case 4 | 2 | 1:
832
            cond = TCG_COND_NE;
833
            break;
834
        default:
835
            goto do_dynamic;
836
        }
837
        account_inline_branch(s, old_cc_op);
838
        break;
839

    
840
    case CC_OP_ICM:
841
        switch (mask) {
842
        case 8:
843
            cond = TCG_COND_EQ;
844
            break;
845
        case 4 | 2 | 1:
846
        case 4 | 2:
847
            cond = TCG_COND_NE;
848
            break;
849
        default:
850
            goto do_dynamic;
851
        }
852
        account_inline_branch(s, old_cc_op);
853
        break;
854

    
855
    default:
856
    do_dynamic:
857
        /* Calculate cc value.  */
858
        gen_op_calc_cc(s);
859
        /* FALLTHRU */
860

    
861
    case CC_OP_STATIC:
862
        /* Jump based on CC.  We'll load up the real cond below;
863
           the assignment here merely avoids a compiler warning.  */
864
        account_noninline_branch(s, old_cc_op);
865
        old_cc_op = CC_OP_STATIC;
866
        cond = TCG_COND_NEVER;
867
        break;
868
    }
869

    
870
    /* Load up the arguments of the comparison.  */
871
    c->is_64 = true;
872
    c->g1 = c->g2 = false;
873
    switch (old_cc_op) {
874
    case CC_OP_LTGT0_32:
875
        c->is_64 = false;
876
        c->u.s32.a = tcg_temp_new_i32();
877
        tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
878
        c->u.s32.b = tcg_const_i32(0);
879
        break;
880
    case CC_OP_LTGT_32:
881
    case CC_OP_LTUGTU_32:
882
        c->is_64 = false;
883
        c->u.s32.a = tcg_temp_new_i32();
884
        tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
885
        c->u.s32.b = tcg_temp_new_i32();
886
        tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
887
        break;
888

    
889
    case CC_OP_LTGT0_64:
890
    case CC_OP_NZ:
891
        c->u.s64.a = cc_dst;
892
        c->u.s64.b = tcg_const_i64(0);
893
        c->g1 = true;
894
        break;
895
    case CC_OP_LTGT_64:
896
    case CC_OP_LTUGTU_64:
897
        c->u.s64.a = cc_src;
898
        c->u.s64.b = cc_dst;
899
        c->g1 = c->g2 = true;
900
        break;
901

    
902
    case CC_OP_TM_32:
903
    case CC_OP_TM_64:
904
    case CC_OP_ICM:
905
        c->u.s64.a = tcg_temp_new_i64();
906
        c->u.s64.b = tcg_const_i64(0);
907
        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
908
        break;
909

    
910
    case CC_OP_STATIC:
911
        c->is_64 = false;
912
        c->u.s32.a = cc_op;
913
        c->g1 = true;
914
        switch (mask) {
915
        case 0x8 | 0x4 | 0x2: /* cc != 3 */
916
            cond = TCG_COND_NE;
917
            c->u.s32.b = tcg_const_i32(3);
918
            break;
919
        case 0x8 | 0x4 | 0x1: /* cc != 2 */
920
            cond = TCG_COND_NE;
921
            c->u.s32.b = tcg_const_i32(2);
922
            break;
923
        case 0x8 | 0x2 | 0x1: /* cc != 1 */
924
            cond = TCG_COND_NE;
925
            c->u.s32.b = tcg_const_i32(1);
926
            break;
927
        case 0x8 | 0x2: /* cc == 0 ||ย cc == 2 => (cc & 1) == 0 */
928
            cond = TCG_COND_EQ;
929
            c->g1 = false;
930
            c->u.s32.a = tcg_temp_new_i32();
931
            c->u.s32.b = tcg_const_i32(0);
932
            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
933
            break;
934
        case 0x8 | 0x4: /* cc < 2 */
935
            cond = TCG_COND_LTU;
936
            c->u.s32.b = tcg_const_i32(2);
937
            break;
938
        case 0x8: /* cc == 0 */
939
            cond = TCG_COND_EQ;
940
            c->u.s32.b = tcg_const_i32(0);
941
            break;
942
        case 0x4 | 0x2 | 0x1: /* cc != 0 */
943
            cond = TCG_COND_NE;
944
            c->u.s32.b = tcg_const_i32(0);
945
            break;
946
        case 0x4 | 0x1: /* cc == 1 ||ย cc == 3 => (cc & 1) != 0 */
947
            cond = TCG_COND_NE;
948
            c->g1 = false;
949
            c->u.s32.a = tcg_temp_new_i32();
950
            c->u.s32.b = tcg_const_i32(0);
951
            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
952
            break;
953
        case 0x4: /* cc == 1 */
954
            cond = TCG_COND_EQ;
955
            c->u.s32.b = tcg_const_i32(1);
956
            break;
957
        case 0x2 | 0x1: /* cc > 1 */
958
            cond = TCG_COND_GTU;
959
            c->u.s32.b = tcg_const_i32(1);
960
            break;
961
        case 0x2: /* cc == 2 */
962
            cond = TCG_COND_EQ;
963
            c->u.s32.b = tcg_const_i32(2);
964
            break;
965
        case 0x1: /* cc == 3 */
966
            cond = TCG_COND_EQ;
967
            c->u.s32.b = tcg_const_i32(3);
968
            break;
969
        default:
970
            /* CC is masked by something else: (8 >> cc) & mask.  */
971
            cond = TCG_COND_NE;
972
            c->g1 = false;
973
            c->u.s32.a = tcg_const_i32(8);
974
            c->u.s32.b = tcg_const_i32(0);
975
            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
976
            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
977
            break;
978
        }
979
        break;
980

    
981
    default:
982
        abort();
983
    }
984
    c->cond = cond;
985
}
986

    
987
static void free_compare(DisasCompare *c)
988
{
989
    if (!c->g1) {
990
        if (c->is_64) {
991
            tcg_temp_free_i64(c->u.s64.a);
992
        } else {
993
            tcg_temp_free_i32(c->u.s32.a);
994
        }
995
    }
996
    if (!c->g2) {
997
        if (c->is_64) {
998
            tcg_temp_free_i64(c->u.s64.b);
999
        } else {
1000
            tcg_temp_free_i32(c->u.s32.b);
1001
        }
1002
    }
1003
}
1004

    
1005
static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1006
                     uint32_t insn)
1007
{
1008
    TCGv_i64 tmp, tmp2, tmp3;
1009
    TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
1010
    int r1, r2;
1011
#ifndef CONFIG_USER_ONLY
1012
    int r3, d2, b2;
1013
#endif
1014

    
1015
    r1 = (insn >> 4) & 0xf;
1016
    r2 = insn & 0xf;
1017

    
1018
    LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1019

    
1020
    switch (op) {
1021
    case 0x22: /* IPM    R1               [RRE] */
1022
        tmp32_1 = tcg_const_i32(r1);
1023
        gen_op_calc_cc(s);
1024
        gen_helper_ipm(cpu_env, cc_op, tmp32_1);
1025
        tcg_temp_free_i32(tmp32_1);
1026
        break;
1027
    case 0x41: /* CKSM    R1,R2     [RRE] */
1028
        tmp32_1 = tcg_const_i32(r1);
1029
        tmp32_2 = tcg_const_i32(r2);
1030
        potential_page_fault(s);
1031
        gen_helper_cksm(cpu_env, tmp32_1, tmp32_2);
1032
        tcg_temp_free_i32(tmp32_1);
1033
        tcg_temp_free_i32(tmp32_2);
1034
        gen_op_movi_cc(s, 0);
1035
        break;
1036
    case 0x4e: /* SAR     R1,R2     [RRE] */
1037
        tmp32_1 = load_reg32(r2);
1038
        tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r1]));
1039
        tcg_temp_free_i32(tmp32_1);
1040
        break;
1041
    case 0x4f: /* EAR     R1,R2     [RRE] */
1042
        tmp32_1 = tcg_temp_new_i32();
1043
        tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1044
        store_reg32(r1, tmp32_1);
1045
        tcg_temp_free_i32(tmp32_1);
1046
        break;
1047
    case 0x54: /* MVPG     R1,R2     [RRE] */
1048
        tmp = load_reg(0);
1049
        tmp2 = load_reg(r1);
1050
        tmp3 = load_reg(r2);
1051
        potential_page_fault(s);
1052
        gen_helper_mvpg(cpu_env, tmp, tmp2, tmp3);
1053
        tcg_temp_free_i64(tmp);
1054
        tcg_temp_free_i64(tmp2);
1055
        tcg_temp_free_i64(tmp3);
1056
        /* XXX check CCO bit and set CC accordingly */
1057
        gen_op_movi_cc(s, 0);
1058
        break;
1059
    case 0x55: /* MVST     R1,R2     [RRE] */
1060
        tmp32_1 = load_reg32(0);
1061
        tmp32_2 = tcg_const_i32(r1);
1062
        tmp32_3 = tcg_const_i32(r2);
1063
        potential_page_fault(s);
1064
        gen_helper_mvst(cpu_env, tmp32_1, tmp32_2, tmp32_3);
1065
        tcg_temp_free_i32(tmp32_1);
1066
        tcg_temp_free_i32(tmp32_2);
1067
        tcg_temp_free_i32(tmp32_3);
1068
        gen_op_movi_cc(s, 1);
1069
        break;
1070
    case 0x5d: /* CLST     R1,R2     [RRE] */
1071
        tmp32_1 = load_reg32(0);
1072
        tmp32_2 = tcg_const_i32(r1);
1073
        tmp32_3 = tcg_const_i32(r2);
1074
        potential_page_fault(s);
1075
        gen_helper_clst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1076
        set_cc_static(s);
1077
        tcg_temp_free_i32(tmp32_1);
1078
        tcg_temp_free_i32(tmp32_2);
1079
        tcg_temp_free_i32(tmp32_3);
1080
        break;
1081
    case 0x5e: /* SRST     R1,R2     [RRE] */
1082
        tmp32_1 = load_reg32(0);
1083
        tmp32_2 = tcg_const_i32(r1);
1084
        tmp32_3 = tcg_const_i32(r2);
1085
        potential_page_fault(s);
1086
        gen_helper_srst(cc_op, cpu_env, tmp32_1, tmp32_2, tmp32_3);
1087
        set_cc_static(s);
1088
        tcg_temp_free_i32(tmp32_1);
1089
        tcg_temp_free_i32(tmp32_2);
1090
        tcg_temp_free_i32(tmp32_3);
1091
        break;
1092

    
1093
#ifndef CONFIG_USER_ONLY
1094
    case 0x02: /* STIDP     D2(B2)     [S] */
1095
        /* Store CPU ID */
1096
        check_privileged(s);
1097
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1098
        tmp = get_address(s, 0, b2, d2);
1099
        potential_page_fault(s);
1100
        gen_helper_stidp(cpu_env, tmp);
1101
        tcg_temp_free_i64(tmp);
1102
        break;
1103
    case 0x04: /* SCK       D2(B2)     [S] */
1104
        /* Set Clock */
1105
        check_privileged(s);
1106
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1107
        tmp = get_address(s, 0, b2, d2);
1108
        potential_page_fault(s);
1109
        gen_helper_sck(cc_op, tmp);
1110
        set_cc_static(s);
1111
        tcg_temp_free_i64(tmp);
1112
        break;
1113
    case 0x05: /* STCK     D2(B2)     [S] */
1114
        /* Store Clock */
1115
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1116
        tmp = get_address(s, 0, b2, d2);
1117
        potential_page_fault(s);
1118
        gen_helper_stck(cc_op, cpu_env, tmp);
1119
        set_cc_static(s);
1120
        tcg_temp_free_i64(tmp);
1121
        break;
1122
    case 0x06: /* SCKC     D2(B2)     [S] */
1123
        /* Set Clock Comparator */
1124
        check_privileged(s);
1125
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1126
        tmp = get_address(s, 0, b2, d2);
1127
        potential_page_fault(s);
1128
        gen_helper_sckc(cpu_env, tmp);
1129
        tcg_temp_free_i64(tmp);
1130
        break;
1131
    case 0x07: /* STCKC    D2(B2)     [S] */
1132
        /* Store Clock Comparator */
1133
        check_privileged(s);
1134
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1135
        tmp = get_address(s, 0, b2, d2);
1136
        potential_page_fault(s);
1137
        gen_helper_stckc(cpu_env, tmp);
1138
        tcg_temp_free_i64(tmp);
1139
        break;
1140
    case 0x08: /* SPT      D2(B2)     [S] */
1141
        /* Set CPU Timer */
1142
        check_privileged(s);
1143
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1144
        tmp = get_address(s, 0, b2, d2);
1145
        potential_page_fault(s);
1146
        gen_helper_spt(cpu_env, tmp);
1147
        tcg_temp_free_i64(tmp);
1148
        break;
1149
    case 0x09: /* STPT     D2(B2)     [S] */
1150
        /* Store CPU Timer */
1151
        check_privileged(s);
1152
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1153
        tmp = get_address(s, 0, b2, d2);
1154
        potential_page_fault(s);
1155
        gen_helper_stpt(cpu_env, tmp);
1156
        tcg_temp_free_i64(tmp);
1157
        break;
1158
    case 0x0a: /* SPKA     D2(B2)     [S] */
1159
        /* Set PSW Key from Address */
1160
        check_privileged(s);
1161
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1162
        tmp = get_address(s, 0, b2, d2);
1163
        tmp2 = tcg_temp_new_i64();
1164
        tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
1165
        tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
1166
        tcg_gen_or_i64(psw_mask, tmp2, tmp);
1167
        tcg_temp_free_i64(tmp2);
1168
        tcg_temp_free_i64(tmp);
1169
        break;
1170
    case 0x0d: /* PTLB                [S] */
1171
        /* Purge TLB */
1172
        check_privileged(s);
1173
        gen_helper_ptlb(cpu_env);
1174
        break;
1175
    case 0x10: /* SPX      D2(B2)     [S] */
1176
        /* Set Prefix Register */
1177
        check_privileged(s);
1178
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1179
        tmp = get_address(s, 0, b2, d2);
1180
        potential_page_fault(s);
1181
        gen_helper_spx(cpu_env, tmp);
1182
        tcg_temp_free_i64(tmp);
1183
        break;
1184
    case 0x11: /* STPX     D2(B2)     [S] */
1185
        /* Store Prefix */
1186
        check_privileged(s);
1187
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1188
        tmp = get_address(s, 0, b2, d2);
1189
        tmp2 = tcg_temp_new_i64();
1190
        tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUS390XState, psa));
1191
        tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1192
        tcg_temp_free_i64(tmp);
1193
        tcg_temp_free_i64(tmp2);
1194
        break;
1195
    case 0x12: /* STAP     D2(B2)     [S] */
1196
        /* Store CPU Address */
1197
        check_privileged(s);
1198
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1199
        tmp = get_address(s, 0, b2, d2);
1200
        tmp2 = tcg_temp_new_i64();
1201
        tmp32_1 = tcg_temp_new_i32();
1202
        tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, cpu_num));
1203
        tcg_gen_extu_i32_i64(tmp2, tmp32_1);
1204
        tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1205
        tcg_temp_free_i64(tmp);
1206
        tcg_temp_free_i64(tmp2);
1207
        tcg_temp_free_i32(tmp32_1);
1208
        break;
1209
    case 0x21: /* IPTE     R1,R2      [RRE] */
1210
        /* Invalidate PTE */
1211
        check_privileged(s);
1212
        r1 = (insn >> 4) & 0xf;
1213
        r2 = insn & 0xf;
1214
        tmp = load_reg(r1);
1215
        tmp2 = load_reg(r2);
1216
        gen_helper_ipte(cpu_env, tmp, tmp2);
1217
        tcg_temp_free_i64(tmp);
1218
        tcg_temp_free_i64(tmp2);
1219
        break;
1220
    case 0x29: /* ISKE     R1,R2      [RRE] */
1221
        /* Insert Storage Key Extended */
1222
        check_privileged(s);
1223
        r1 = (insn >> 4) & 0xf;
1224
        r2 = insn & 0xf;
1225
        tmp = load_reg(r2);
1226
        tmp2 = tcg_temp_new_i64();
1227
        gen_helper_iske(tmp2, cpu_env, tmp);
1228
        store_reg(r1, tmp2);
1229
        tcg_temp_free_i64(tmp);
1230
        tcg_temp_free_i64(tmp2);
1231
        break;
1232
    case 0x2a: /* RRBE     R1,R2      [RRE] */
1233
        /* Set Storage Key Extended */
1234
        check_privileged(s);
1235
        r1 = (insn >> 4) & 0xf;
1236
        r2 = insn & 0xf;
1237
        tmp32_1 = load_reg32(r1);
1238
        tmp = load_reg(r2);
1239
        gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1240
        set_cc_static(s);
1241
        tcg_temp_free_i32(tmp32_1);
1242
        tcg_temp_free_i64(tmp);
1243
        break;
1244
    case 0x2b: /* SSKE     R1,R2      [RRE] */
1245
        /* Set Storage Key Extended */
1246
        check_privileged(s);
1247
        r1 = (insn >> 4) & 0xf;
1248
        r2 = insn & 0xf;
1249
        tmp32_1 = load_reg32(r1);
1250
        tmp = load_reg(r2);
1251
        gen_helper_sske(cpu_env, tmp32_1, tmp);
1252
        tcg_temp_free_i32(tmp32_1);
1253
        tcg_temp_free_i64(tmp);
1254
        break;
1255
    case 0x34: /* STCH ? */
1256
        /* Store Subchannel */
1257
        check_privileged(s);
1258
        gen_op_movi_cc(s, 3);
1259
        break;
1260
    case 0x46: /* STURA    R1,R2      [RRE] */
1261
        /* Store Using Real Address */
1262
        check_privileged(s);
1263
        r1 = (insn >> 4) & 0xf;
1264
        r2 = insn & 0xf;
1265
        tmp32_1 = load_reg32(r1);
1266
        tmp = load_reg(r2);
1267
        potential_page_fault(s);
1268
        gen_helper_stura(cpu_env, tmp, tmp32_1);
1269
        tcg_temp_free_i32(tmp32_1);
1270
        tcg_temp_free_i64(tmp);
1271
        break;
1272
    case 0x50: /* CSP      R1,R2      [RRE] */
1273
        /* Compare And Swap And Purge */
1274
        check_privileged(s);
1275
        r1 = (insn >> 4) & 0xf;
1276
        r2 = insn & 0xf;
1277
        tmp32_1 = tcg_const_i32(r1);
1278
        tmp32_2 = tcg_const_i32(r2);
1279
        gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1280
        set_cc_static(s);
1281
        tcg_temp_free_i32(tmp32_1);
1282
        tcg_temp_free_i32(tmp32_2);
1283
        break;
1284
    case 0x5f: /* CHSC ? */
1285
        /* Channel Subsystem Call */
1286
        check_privileged(s);
1287
        gen_op_movi_cc(s, 3);
1288
        break;
1289
    case 0x78: /* STCKE    D2(B2)     [S] */
1290
        /* Store Clock Extended */
1291
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1292
        tmp = get_address(s, 0, b2, d2);
1293
        potential_page_fault(s);
1294
        gen_helper_stcke(cc_op, cpu_env, tmp);
1295
        set_cc_static(s);
1296
        tcg_temp_free_i64(tmp);
1297
        break;
1298
    case 0x79: /* SACF    D2(B2)     [S] */
1299
        /* Set Address Space Control Fast */
1300
        check_privileged(s);
1301
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1302
        tmp = get_address(s, 0, b2, d2);
1303
        potential_page_fault(s);
1304
        gen_helper_sacf(cpu_env, tmp);
1305
        tcg_temp_free_i64(tmp);
1306
        /* addressing mode has changed, so end the block */
1307
        s->pc = s->next_pc;
1308
        update_psw_addr(s);
1309
        s->is_jmp = DISAS_JUMP;
1310
        break;
1311
    case 0x7d: /* STSI     D2,(B2)     [S] */
1312
        check_privileged(s);
1313
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1314
        tmp = get_address(s, 0, b2, d2);
1315
        tmp32_1 = load_reg32(0);
1316
        tmp32_2 = load_reg32(1);
1317
        potential_page_fault(s);
1318
        gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1319
        set_cc_static(s);
1320
        tcg_temp_free_i64(tmp);
1321
        tcg_temp_free_i32(tmp32_1);
1322
        tcg_temp_free_i32(tmp32_2);
1323
        break;
1324
    case 0x9d: /* LFPC      D2(B2)   [S] */
1325
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1326
        tmp = get_address(s, 0, b2, d2);
1327
        tmp2 = tcg_temp_new_i64();
1328
        tmp32_1 = tcg_temp_new_i32();
1329
        tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
1330
        tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
1331
        tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1332
        tcg_temp_free_i64(tmp);
1333
        tcg_temp_free_i64(tmp2);
1334
        tcg_temp_free_i32(tmp32_1);
1335
        break;
1336
    case 0xb1: /* STFL     D2(B2)     [S] */
1337
        /* Store Facility List (CPU features) at 200 */
1338
        check_privileged(s);
1339
        tmp2 = tcg_const_i64(0xc0000000);
1340
        tmp = tcg_const_i64(200);
1341
        tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1342
        tcg_temp_free_i64(tmp2);
1343
        tcg_temp_free_i64(tmp);
1344
        break;
1345
    case 0xb2: /* LPSWE    D2(B2)     [S] */
1346
        /* Load PSW Extended */
1347
        check_privileged(s);
1348
        decode_rs(s, insn, &r1, &r3, &b2, &d2);
1349
        tmp = get_address(s, 0, b2, d2);
1350
        tmp2 = tcg_temp_new_i64();
1351
        tmp3 = tcg_temp_new_i64();
1352
        tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1353
        tcg_gen_addi_i64(tmp, tmp, 8);
1354
        tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1355
        gen_helper_load_psw(cpu_env, tmp2, tmp3);
1356
        /* we need to keep cc_op intact */
1357
        s->is_jmp = DISAS_JUMP;
1358
        tcg_temp_free_i64(tmp);
1359
        tcg_temp_free_i64(tmp2);
1360
        tcg_temp_free_i64(tmp3);
1361
        break;
1362
    case 0x20: /* SERVC     R1,R2     [RRE] */
1363
        /* SCLP Service call (PV hypercall) */
1364
        check_privileged(s);
1365
        potential_page_fault(s);
1366
        tmp32_1 = load_reg32(r2);
1367
        tmp = load_reg(r1);
1368
        gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1369
        set_cc_static(s);
1370
        tcg_temp_free_i32(tmp32_1);
1371
        tcg_temp_free_i64(tmp);
1372
        break;
1373
#endif
1374
    default:
1375
        LOG_DISAS("illegal b2 operation 0x%x\n", op);
1376
        gen_illegal_opcode(s);
1377
        break;
1378
    }
1379
}
1380

    
1381
static void disas_b3(CPUS390XState *env, DisasContext *s, int op, int m3,
1382
                     int r1, int r2)
1383
{
1384
    TCGv_i32 tmp32_1;
1385
    LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
1386
#define FP_HELPER(i) \
1387
    tmp32_1 = tcg_const_i32(r1); \
1388
    tmp32_2 = tcg_const_i32(r2); \
1389
    gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1390
    tcg_temp_free_i32(tmp32_1); \
1391
    tcg_temp_free_i32(tmp32_2);
1392

    
1393
#define FP_HELPER_CC(i) \
1394
    tmp32_1 = tcg_const_i32(r1); \
1395
    tmp32_2 = tcg_const_i32(r2); \
1396
    gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1397
    set_cc_static(s); \
1398
    tcg_temp_free_i32(tmp32_1); \
1399
    tcg_temp_free_i32(tmp32_2);
1400

    
1401
    switch (op) {
1402
    case 0x84: /* SFPC        R1                [RRE] */
1403
        tmp32_1 = load_reg32(r1);
1404
        tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUS390XState, fpc));
1405
        tcg_temp_free_i32(tmp32_1);
1406
        break;
1407
    default:
1408
        LOG_DISAS("illegal b3 operation 0x%x\n", op);
1409
        gen_illegal_opcode(s);
1410
        break;
1411
    }
1412

    
1413
#undef FP_HELPER_CC
1414
#undef FP_HELPER
1415
}
1416

    
1417
static void disas_b9(CPUS390XState *env, DisasContext *s, int op, int r1,
1418
                     int r2)
1419
{
1420
    TCGv_i64 tmp;
1421
    TCGv_i32 tmp32_1;
1422

    
1423
    LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1424
    switch (op) {
1425
    case 0x83: /* FLOGR R1,R2 [RRE] */
1426
        tmp = load_reg(r2);
1427
        tmp32_1 = tcg_const_i32(r1);
1428
        gen_helper_flogr(cc_op, cpu_env, tmp32_1, tmp);
1429
        set_cc_static(s);
1430
        tcg_temp_free_i64(tmp);
1431
        tcg_temp_free_i32(tmp32_1);
1432
        break;
1433
    default:
1434
        LOG_DISAS("illegal b9 operation 0x%x\n", op);
1435
        gen_illegal_opcode(s);
1436
        break;
1437
    }
1438
}
1439

    
1440
static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1441
{
1442
    unsigned char opc;
1443
    uint64_t insn;
1444
    int op, r1, r2, r3;
1445

    
1446
    opc = cpu_ldub_code(env, s->pc);
1447
    LOG_DISAS("opc 0x%x\n", opc);
1448

    
1449
    switch (opc) {
1450
    case 0xb2:
1451
        insn = ld_code4(env, s->pc);
1452
        op = (insn >> 16) & 0xff;
1453
        disas_b2(env, s, op, insn);
1454
        break;
1455
    case 0xb3:
1456
        insn = ld_code4(env, s->pc);
1457
        op = (insn >> 16) & 0xff;
1458
        r3 = (insn >> 12) & 0xf; /* aka m3 */
1459
        r1 = (insn >> 4) & 0xf;
1460
        r2 = insn & 0xf;
1461
        disas_b3(env, s, op, r3, r1, r2);
1462
        break;
1463
    case 0xb9:
1464
        insn = ld_code4(env, s->pc);
1465
        r1 = (insn >> 4) & 0xf;
1466
        r2 = insn & 0xf;
1467
        op = (insn >> 16) & 0xff;
1468
        disas_b9(env, s, op, r1, r2);
1469
        break;
1470
    default:
1471
        qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
1472
        gen_illegal_opcode(s);
1473
        break;
1474
    }
1475
}
1476

    
1477
/* ====================================================================== */
1478
/* Define the insn format enumeration.  */
1479
#define F0(N)                         FMT_##N,
1480
#define F1(N, X1)                     F0(N)
1481
#define F2(N, X1, X2)                 F0(N)
1482
#define F3(N, X1, X2, X3)             F0(N)
1483
#define F4(N, X1, X2, X3, X4)         F0(N)
1484
#define F5(N, X1, X2, X3, X4, X5)     F0(N)
1485

    
1486
typedef enum {
1487
#include "insn-format.def"
1488
} DisasFormat;
1489

    
1490
#undef F0
1491
#undef F1
1492
#undef F2
1493
#undef F3
1494
#undef F4
1495
#undef F5
1496

    
1497
/* Define a structure to hold the decoded fields.  We'll store each inside
1498
   an array indexed by an enum.  In order to conserve memory, we'll arrange
1499
   for fields that do not exist at the same time to overlap, thus the "C"
1500
   for compact.  For checking purposes there is an "O" for original index
1501
   as well that will be applied to availability bitmaps.  */
1502

    
1503
enum DisasFieldIndexO {
1504
    FLD_O_r1,
1505
    FLD_O_r2,
1506
    FLD_O_r3,
1507
    FLD_O_m1,
1508
    FLD_O_m3,
1509
    FLD_O_m4,
1510
    FLD_O_b1,
1511
    FLD_O_b2,
1512
    FLD_O_b4,
1513
    FLD_O_d1,
1514
    FLD_O_d2,
1515
    FLD_O_d4,
1516
    FLD_O_x2,
1517
    FLD_O_l1,
1518
    FLD_O_l2,
1519
    FLD_O_i1,
1520
    FLD_O_i2,
1521
    FLD_O_i3,
1522
    FLD_O_i4,
1523
    FLD_O_i5
1524
};
1525

    
1526
enum DisasFieldIndexC {
1527
    FLD_C_r1 = 0,
1528
    FLD_C_m1 = 0,
1529
    FLD_C_b1 = 0,
1530
    FLD_C_i1 = 0,
1531

    
1532
    FLD_C_r2 = 1,
1533
    FLD_C_b2 = 1,
1534
    FLD_C_i2 = 1,
1535

    
1536
    FLD_C_r3 = 2,
1537
    FLD_C_m3 = 2,
1538
    FLD_C_i3 = 2,
1539

    
1540
    FLD_C_m4 = 3,
1541
    FLD_C_b4 = 3,
1542
    FLD_C_i4 = 3,
1543
    FLD_C_l1 = 3,
1544

    
1545
    FLD_C_i5 = 4,
1546
    FLD_C_d1 = 4,
1547

    
1548
    FLD_C_d2 = 5,
1549

    
1550
    FLD_C_d4 = 6,
1551
    FLD_C_x2 = 6,
1552
    FLD_C_l2 = 6,
1553

    
1554
    NUM_C_FIELD = 7
1555
};
1556

    
1557
struct DisasFields {
1558
    unsigned op:8;
1559
    unsigned op2:8;
1560
    unsigned presentC:16;
1561
    unsigned int presentO;
1562
    int c[NUM_C_FIELD];
1563
};
1564

    
1565
/* This is the way fields are to be accessed out of DisasFields.  */
1566
#define have_field(S, F)  have_field1((S), FLD_O_##F)
1567
#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1568

    
1569
static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1570
{
1571
    return (f->presentO >> c) & 1;
1572
}
1573

    
1574
static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1575
                      enum DisasFieldIndexC c)
1576
{
1577
    assert(have_field1(f, o));
1578
    return f->c[c];
1579
}
1580

    
1581
/* Describe the layout of each field in each format.  */
1582
typedef struct DisasField {
1583
    unsigned int beg:8;
1584
    unsigned int size:8;
1585
    unsigned int type:2;
1586
    unsigned int indexC:6;
1587
    enum DisasFieldIndexO indexO:8;
1588
} DisasField;
1589

    
1590
typedef struct DisasFormatInfo {
1591
    DisasField op[NUM_C_FIELD];
1592
} DisasFormatInfo;
1593

    
1594
#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1595
#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1596
#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1597
                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1598
#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1599
                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1600
                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1601
#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1602
                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1603
#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1604
                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1605
                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1606
#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1607
#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1608

    
1609
#define F0(N)                     { { } },
1610
#define F1(N, X1)                 { { X1 } },
1611
#define F2(N, X1, X2)             { { X1, X2 } },
1612
#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1613
#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1614
#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1615

    
1616
static const DisasFormatInfo format_info[] = {
1617
#include "insn-format.def"
1618
};
1619

    
1620
#undef F0
1621
#undef F1
1622
#undef F2
1623
#undef F3
1624
#undef F4
1625
#undef F5
1626
#undef R
1627
#undef M
1628
#undef BD
1629
#undef BXD
1630
#undef BDL
1631
#undef BXDL
1632
#undef I
1633
#undef L
1634

    
1635
/* Generally, we'll extract operands into this structures, operate upon
1636
   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1637
   of routines below for more details.  */
1638
typedef struct {
1639
    bool g_out, g_out2, g_in1, g_in2;
1640
    TCGv_i64 out, out2, in1, in2;
1641
    TCGv_i64 addr1;
1642
} DisasOps;
1643

    
1644
/* Return values from translate_one, indicating the state of the TB.  */
1645
typedef enum {
1646
    /* Continue the TB.  */
1647
    NO_EXIT,
1648
    /* We have emitted one or more goto_tb.  No fixup required.  */
1649
    EXIT_GOTO_TB,
1650
    /* We are not using a goto_tb (for whatever reason), but have updated
1651
       the PC (for whatever reason), so there's no need to do it again on
1652
       exiting the TB.  */
1653
    EXIT_PC_UPDATED,
1654
    /* We are exiting the TB, but have neither emitted a goto_tb, nor
1655
       updated the PC for the next instruction to be executed.  */
1656
    EXIT_PC_STALE,
1657
    /* We are ending the TB with a noreturn function call, e.g. longjmp.
1658
       No following code will be executed.  */
1659
    EXIT_NORETURN,
1660
} ExitStatus;
1661

    
1662
typedef enum DisasFacility {
1663
    FAC_Z,                  /* zarch (default) */
1664
    FAC_CASS,               /* compare and swap and store */
1665
    FAC_CASS2,              /* compare and swap and store 2*/
1666
    FAC_DFP,                /* decimal floating point */
1667
    FAC_DFPR,               /* decimal floating point rounding */
1668
    FAC_DO,                 /* distinct operands */
1669
    FAC_EE,                 /* execute extensions */
1670
    FAC_EI,                 /* extended immediate */
1671
    FAC_FPE,                /* floating point extension */
1672
    FAC_FPSSH,              /* floating point support sign handling */
1673
    FAC_FPRGR,              /* FPR-GR transfer */
1674
    FAC_GIE,                /* general instructions extension */
1675
    FAC_HFP_MA,             /* HFP multiply-and-add/subtract */
1676
    FAC_HW,                 /* high-word */
1677
    FAC_IEEEE_SIM,          /* IEEE exception sumilation */
1678
    FAC_LOC,                /* load/store on condition */
1679
    FAC_LD,                 /* long displacement */
1680
    FAC_PC,                 /* population count */
1681
    FAC_SCF,                /* store clock fast */
1682
    FAC_SFLE,               /* store facility list extended */
1683
} DisasFacility;
1684

    
1685
struct DisasInsn {
1686
    unsigned opc:16;
1687
    DisasFormat fmt:6;
1688
    DisasFacility fac:6;
1689

    
1690
    const char *name;
1691

    
1692
    void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1693
    void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1694
    void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1695
    void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1696
    void (*help_cout)(DisasContext *, DisasOps *);
1697
    ExitStatus (*help_op)(DisasContext *, DisasOps *);
1698

    
1699
    uint64_t data;
1700
};
1701

    
1702
/* ====================================================================== */
1703
/* Miscelaneous helpers, used by several operations.  */
1704

    
1705
static void help_l2_shift(DisasContext *s, DisasFields *f,
1706
                          DisasOps *o, int mask)
1707
{
1708
    int b2 = get_field(f, b2);
1709
    int d2 = get_field(f, d2);
1710

    
1711
    if (b2 == 0) {
1712
        o->in2 = tcg_const_i64(d2 & mask);
1713
    } else {
1714
        o->in2 = get_address(s, 0, b2, d2);
1715
        tcg_gen_andi_i64(o->in2, o->in2, mask);
1716
    }
1717
}
1718

    
1719
static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1720
{
1721
    if (dest == s->next_pc) {
1722
        return NO_EXIT;
1723
    }
1724
    if (use_goto_tb(s, dest)) {
1725
        gen_update_cc_op(s);
1726
        tcg_gen_goto_tb(0);
1727
        tcg_gen_movi_i64(psw_addr, dest);
1728
        tcg_gen_exit_tb((tcg_target_long)s->tb);
1729
        return EXIT_GOTO_TB;
1730
    } else {
1731
        tcg_gen_movi_i64(psw_addr, dest);
1732
        return EXIT_PC_UPDATED;
1733
    }
1734
}
1735

    
1736
static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1737
                              bool is_imm, int imm, TCGv_i64 cdest)
1738
{
1739
    ExitStatus ret;
1740
    uint64_t dest = s->pc + 2 * imm;
1741
    int lab;
1742

    
1743
    /* Take care of the special cases first.  */
1744
    if (c->cond == TCG_COND_NEVER) {
1745
        ret = NO_EXIT;
1746
        goto egress;
1747
    }
1748
    if (is_imm) {
1749
        if (dest == s->next_pc) {
1750
            /* Branch to next.  */
1751
            ret = NO_EXIT;
1752
            goto egress;
1753
        }
1754
        if (c->cond == TCG_COND_ALWAYS) {
1755
            ret = help_goto_direct(s, dest);
1756
            goto egress;
1757
        }
1758
    } else {
1759
        if (TCGV_IS_UNUSED_I64(cdest)) {
1760
            /* E.g. bcr %r0 -> no branch.  */
1761
            ret = NO_EXIT;
1762
            goto egress;
1763
        }
1764
        if (c->cond == TCG_COND_ALWAYS) {
1765
            tcg_gen_mov_i64(psw_addr, cdest);
1766
            ret = EXIT_PC_UPDATED;
1767
            goto egress;
1768
        }
1769
    }
1770

    
1771
    if (use_goto_tb(s, s->next_pc)) {
1772
        if (is_imm && use_goto_tb(s, dest)) {
1773
            /* Both exits can use goto_tb.  */
1774
            gen_update_cc_op(s);
1775

    
1776
            lab = gen_new_label();
1777
            if (c->is_64) {
1778
                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1779
            } else {
1780
                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1781
            }
1782

    
1783
            /* Branch not taken.  */
1784
            tcg_gen_goto_tb(0);
1785
            tcg_gen_movi_i64(psw_addr, s->next_pc);
1786
            tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1787

    
1788
            /* Branch taken.  */
1789
            gen_set_label(lab);
1790
            tcg_gen_goto_tb(1);
1791
            tcg_gen_movi_i64(psw_addr, dest);
1792
            tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1793

    
1794
            ret = EXIT_GOTO_TB;
1795
        } else {
1796
            /* Fallthru can use goto_tb, but taken branch cannot.  */
1797
            /* Store taken branch destination before the brcond.  This
1798
               avoids having to allocate a new local temp to hold it.
1799
               We'll overwrite this in the not taken case anyway.  */
1800
            if (!is_imm) {
1801
                tcg_gen_mov_i64(psw_addr, cdest);
1802
            }
1803

    
1804
            lab = gen_new_label();
1805
            if (c->is_64) {
1806
                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1807
            } else {
1808
                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1809
            }
1810

    
1811
            /* Branch not taken.  */
1812
            gen_update_cc_op(s);
1813
            tcg_gen_goto_tb(0);
1814
            tcg_gen_movi_i64(psw_addr, s->next_pc);
1815
            tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1816

    
1817
            gen_set_label(lab);
1818
            if (is_imm) {
1819
                tcg_gen_movi_i64(psw_addr, dest);
1820
            }
1821
            ret = EXIT_PC_UPDATED;
1822
        }
1823
    } else {
1824
        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1825
           Most commonly we're single-stepping or some other condition that
1826
           disables all use of goto_tb.  Just update the PC and exit.  */
1827

    
1828
        TCGv_i64 next = tcg_const_i64(s->next_pc);
1829
        if (is_imm) {
1830
            cdest = tcg_const_i64(dest);
1831
        }
1832

    
1833
        if (c->is_64) {
1834
            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1835
                                cdest, next);
1836
        } else {
1837
            TCGv_i32 t0 = tcg_temp_new_i32();
1838
            TCGv_i64 t1 = tcg_temp_new_i64();
1839
            TCGv_i64 z = tcg_const_i64(0);
1840
            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1841
            tcg_gen_extu_i32_i64(t1, t0);
1842
            tcg_temp_free_i32(t0);
1843
            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1844
            tcg_temp_free_i64(t1);
1845
            tcg_temp_free_i64(z);
1846
        }
1847

    
1848
        if (is_imm) {
1849
            tcg_temp_free_i64(cdest);
1850
        }
1851
        tcg_temp_free_i64(next);
1852

    
1853
        ret = EXIT_PC_UPDATED;
1854
    }
1855

    
1856
 egress:
1857
    free_compare(c);
1858
    return ret;
1859
}
1860

    
1861
/* ====================================================================== */
1862
/* The operations.  These perform the bulk of the work for any insn,
1863
   usually after the operands have been loaded and output initialized.  */
1864

    
1865
static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1866
{
1867
    gen_helper_abs_i64(o->out, o->in2);
1868
    return NO_EXIT;
1869
}
1870

    
1871
static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1872
{
1873
    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1874
    return NO_EXIT;
1875
}
1876

    
1877
static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1878
{
1879
    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1880
    return NO_EXIT;
1881
}
1882

    
1883
static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1884
{
1885
    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1886
    tcg_gen_mov_i64(o->out2, o->in2);
1887
    return NO_EXIT;
1888
}
1889

    
1890
static ExitStatus op_add(DisasContext *s, DisasOps *o)
1891
{
1892
    tcg_gen_add_i64(o->out, o->in1, o->in2);
1893
    return NO_EXIT;
1894
}
1895

    
1896
static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1897
{
1898
    TCGv_i64 cc;
1899

    
1900
    tcg_gen_add_i64(o->out, o->in1, o->in2);
1901

    
1902
    /* XXX possible optimization point */
1903
    gen_op_calc_cc(s);
1904
    cc = tcg_temp_new_i64();
1905
    tcg_gen_extu_i32_i64(cc, cc_op);
1906
    tcg_gen_shri_i64(cc, cc, 1);
1907

    
1908
    tcg_gen_add_i64(o->out, o->out, cc);
1909
    tcg_temp_free_i64(cc);
1910
    return NO_EXIT;
1911
}
1912

    
1913
static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1914
{
1915
    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1916
    return NO_EXIT;
1917
}
1918

    
1919
static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1920
{
1921
    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1922
    return NO_EXIT;
1923
}
1924

    
1925
static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1926
{
1927
    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1928
    return_low128(o->out2);
1929
    return NO_EXIT;
1930
}
1931

    
1932
static ExitStatus op_and(DisasContext *s, DisasOps *o)
1933
{
1934
    tcg_gen_and_i64(o->out, o->in1, o->in2);
1935
    return NO_EXIT;
1936
}
1937

    
1938
static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1939
{
1940
    int shift = s->insn->data & 0xff;
1941
    int size = s->insn->data >> 8;
1942
    uint64_t mask = ((1ull << size) - 1) << shift;
1943

    
1944
    assert(!o->g_in2);
1945
    tcg_gen_shli_i64(o->in2, o->in2, shift);
1946
    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1947
    tcg_gen_and_i64(o->out, o->in1, o->in2);
1948

    
1949
    /* Produce the CC from only the bits manipulated.  */
1950
    tcg_gen_andi_i64(cc_dst, o->out, mask);
1951
    set_cc_nz_u64(s, cc_dst);
1952
    return NO_EXIT;
1953
}
1954

    
1955
static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1956
{
1957
    tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1958
    if (!TCGV_IS_UNUSED_I64(o->in2)) {
1959
        tcg_gen_mov_i64(psw_addr, o->in2);
1960
        return EXIT_PC_UPDATED;
1961
    } else {
1962
        return NO_EXIT;
1963
    }
1964
}
1965

    
1966
static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1967
{
1968
    tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1969
    return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1970
}
1971

    
1972
static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1973
{
1974
    int m1 = get_field(s->fields, m1);
1975
    bool is_imm = have_field(s->fields, i2);
1976
    int imm = is_imm ? get_field(s->fields, i2) : 0;
1977
    DisasCompare c;
1978

    
1979
    disas_jcc(s, &c, m1);
1980
    return help_branch(s, &c, is_imm, imm, o->in2);
1981
}
1982

    
1983
static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1984
{
1985
    int r1 = get_field(s->fields, r1);
1986
    bool is_imm = have_field(s->fields, i2);
1987
    int imm = is_imm ? get_field(s->fields, i2) : 0;
1988
    DisasCompare c;
1989
    TCGv_i64 t;
1990

    
1991
    c.cond = TCG_COND_NE;
1992
    c.is_64 = false;
1993
    c.g1 = false;
1994
    c.g2 = false;
1995

    
1996
    t = tcg_temp_new_i64();
1997
    tcg_gen_subi_i64(t, regs[r1], 1);
1998
    store_reg32_i64(r1, t);
1999
    c.u.s32.a = tcg_temp_new_i32();
2000
    c.u.s32.b = tcg_const_i32(0);
2001
    tcg_gen_trunc_i64_i32(c.u.s32.a, t);
2002
    tcg_temp_free_i64(t);
2003

    
2004
    return help_branch(s, &c, is_imm, imm, o->in2);
2005
}
2006

    
2007
static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
2008
{
2009
    int r1 = get_field(s->fields, r1);
2010
    bool is_imm = have_field(s->fields, i2);
2011
    int imm = is_imm ? get_field(s->fields, i2) : 0;
2012
    DisasCompare c;
2013

    
2014
    c.cond = TCG_COND_NE;
2015
    c.is_64 = true;
2016
    c.g1 = true;
2017
    c.g2 = false;
2018

    
2019
    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
2020
    c.u.s64.a = regs[r1];
2021
    c.u.s64.b = tcg_const_i64(0);
2022

    
2023
    return help_branch(s, &c, is_imm, imm, o->in2);
2024
}
2025

    
2026
static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
2027
{
2028
    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
2029
    set_cc_static(s);
2030
    return NO_EXIT;
2031
}
2032

    
2033
static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
2034
{
2035
    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
2036
    set_cc_static(s);
2037
    return NO_EXIT;
2038
}
2039

    
2040
static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
2041
{
2042
    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2043
    set_cc_static(s);
2044
    return NO_EXIT;
2045
}
2046

    
2047
static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
2048
{
2049
    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2050
    gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
2051
    tcg_temp_free_i32(m3);
2052
    gen_set_cc_nz_f32(s, o->in2);
2053
    return NO_EXIT;
2054
}
2055

    
2056
static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
2057
{
2058
    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2059
    gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
2060
    tcg_temp_free_i32(m3);
2061
    gen_set_cc_nz_f64(s, o->in2);
2062
    return NO_EXIT;
2063
}
2064

    
2065
static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
2066
{
2067
    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2068
    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
2069
    tcg_temp_free_i32(m3);
2070
    gen_set_cc_nz_f128(s, o->in1, o->in2);
2071
    return NO_EXIT;
2072
}
2073

    
2074
static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
2075
{
2076
    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2077
    gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
2078
    tcg_temp_free_i32(m3);
2079
    gen_set_cc_nz_f32(s, o->in2);
2080
    return NO_EXIT;
2081
}
2082

    
2083
static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
2084
{
2085
    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2086
    gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
2087
    tcg_temp_free_i32(m3);
2088
    gen_set_cc_nz_f64(s, o->in2);
2089
    return NO_EXIT;
2090
}
2091

    
2092
static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
2093
{
2094
    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2095
    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
2096
    tcg_temp_free_i32(m3);
2097
    gen_set_cc_nz_f128(s, o->in1, o->in2);
2098
    return NO_EXIT;
2099
}
2100

    
2101
static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
2102
{
2103
    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2104
    gen_helper_cegb(o->out, cpu_env, o->in2, m3);
2105
    tcg_temp_free_i32(m3);
2106
    return NO_EXIT;
2107
}
2108

    
2109
static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
2110
{
2111
    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2112
    gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
2113
    tcg_temp_free_i32(m3);
2114
    return NO_EXIT;
2115
}
2116

    
2117
static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
2118
{
2119
    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2120
    gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
2121
    tcg_temp_free_i32(m3);
2122
    return_low128(o->out2);
2123
    return NO_EXIT;
2124
}
2125

    
2126
static ExitStatus op_clc(DisasContext *s, DisasOps *o)
2127
{
2128
    int l = get_field(s->fields, l1);
2129
    TCGv_i32 vl;
2130

    
2131
    switch (l + 1) {
2132
    case 1:
2133
        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2134
        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2135
        break;
2136
    case 2:
2137
        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2138
        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2139
        break;
2140
    case 4:
2141
        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2142
        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2143
        break;
2144
    case 8:
2145
        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2146
        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2147
        break;
2148
    default:
2149
        potential_page_fault(s);
2150
        vl = tcg_const_i32(l);
2151
        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2152
        tcg_temp_free_i32(vl);
2153
        set_cc_static(s);
2154
        return NO_EXIT;
2155
    }
2156
    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2157
    return NO_EXIT;
2158
}
2159

    
2160
static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
2161
{
2162
    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2163
    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2164
    potential_page_fault(s);
2165
    gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
2166
    tcg_temp_free_i32(r1);
2167
    tcg_temp_free_i32(r3);
2168
    set_cc_static(s);
2169
    return NO_EXIT;
2170
}
2171

    
2172
static ExitStatus op_clm(DisasContext *s, DisasOps *o)
2173
{
2174
    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2175
    TCGv_i32 t1 = tcg_temp_new_i32();
2176
    tcg_gen_trunc_i64_i32(t1, o->in1);
2177
    potential_page_fault(s);
2178
    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2179
    set_cc_static(s);
2180
    tcg_temp_free_i32(t1);
2181
    tcg_temp_free_i32(m3);
2182
    return NO_EXIT;
2183
}
2184

    
2185
static ExitStatus op_cs(DisasContext *s, DisasOps *o)
2186
{
2187
    int r3 = get_field(s->fields, r3);
2188
    potential_page_fault(s);
2189
    gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2190
    set_cc_static(s);
2191
    return NO_EXIT;
2192
}
2193

    
2194
static ExitStatus op_csg(DisasContext *s, DisasOps *o)
2195
{
2196
    int r3 = get_field(s->fields, r3);
2197
    potential_page_fault(s);
2198
    gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
2199
    set_cc_static(s);
2200
    return NO_EXIT;
2201
}
2202

    
2203
static ExitStatus op_cds(DisasContext *s, DisasOps *o)
2204
{
2205
    int r3 = get_field(s->fields, r3);
2206
    TCGv_i64 in3 = tcg_temp_new_i64();
2207
    tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
2208
    potential_page_fault(s);
2209
    gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
2210
    tcg_temp_free_i64(in3);
2211
    set_cc_static(s);
2212
    return NO_EXIT;
2213
}
2214

    
2215
static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2216
{
2217
    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2218
    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2219
    potential_page_fault(s);
2220
    /* XXX rewrite in tcg */
2221
    gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
2222
    set_cc_static(s);
2223
    return NO_EXIT;
2224
}
2225

    
2226
static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2227
{
2228
    TCGv_i64 t1 = tcg_temp_new_i64();
2229
    TCGv_i32 t2 = tcg_temp_new_i32();
2230
    tcg_gen_trunc_i64_i32(t2, o->in1);
2231
    gen_helper_cvd(t1, t2);
2232
    tcg_temp_free_i32(t2);
2233
    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2234
    tcg_temp_free_i64(t1);
2235
    return NO_EXIT;
2236
}
2237

    
2238
#ifndef CONFIG_USER_ONLY
2239
static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2240
{
2241
    TCGv_i32 tmp;
2242

    
2243
    check_privileged(s);
2244
    potential_page_fault(s);
2245

    
2246
    /* We pretend the format is RX_a so that D2 is the field we want.  */
2247
    tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2248
    gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2249
    tcg_temp_free_i32(tmp);
2250
    return NO_EXIT;
2251
}
2252
#endif
2253

    
2254
static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2255
{
2256
    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2257
    return_low128(o->out);
2258
    return NO_EXIT;
2259
}
2260

    
2261
static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2262
{
2263
    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2264
    return_low128(o->out);
2265
    return NO_EXIT;
2266
}
2267

    
2268
static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2269
{
2270
    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2271
    return_low128(o->out);
2272
    return NO_EXIT;
2273
}
2274

    
2275
static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2276
{
2277
    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2278
    return_low128(o->out);
2279
    return NO_EXIT;
2280
}
2281

    
2282
static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2283
{
2284
    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2285
    return NO_EXIT;
2286
}
2287

    
2288
static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2289
{
2290
    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2291
    return NO_EXIT;
2292
}
2293

    
2294
static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2295
{
2296
    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2297
    return_low128(o->out2);
2298
    return NO_EXIT;
2299
}
2300

    
2301
static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2302
{
2303
    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2304
    return NO_EXIT;
2305
}
2306

    
2307
static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2308
{
2309
    /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2310
       tb->flags, (ab)use the tb->cs_base field as the address of
2311
       the template in memory, and grab 8 bits of tb->flags/cflags for
2312
       the contents of the register.  We would then recognize all this
2313
       in gen_intermediate_code_internal, generating code for exactly
2314
       one instruction.  This new TB then gets executed normally.
2315

2316
       On the other hand, this seems to be mostly used for modifying
2317
       MVC inside of memcpy, which needs a helper call anyway.  So
2318
       perhaps this doesn't bear thinking about any further.  */
2319

    
2320
    TCGv_i64 tmp;
2321

    
2322
    update_psw_addr(s);
2323
    gen_op_calc_cc(s);
2324

    
2325
    tmp = tcg_const_i64(s->next_pc);
2326
    gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2327
    tcg_temp_free_i64(tmp);
2328

    
2329
    set_cc_static(s);
2330
    return NO_EXIT;
2331
}
2332

    
2333
static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2334
{
2335
    int m3 = get_field(s->fields, m3);
2336
    int pos, len, base = s->insn->data;
2337
    TCGv_i64 tmp = tcg_temp_new_i64();
2338
    uint64_t ccm;
2339

    
2340
    switch (m3) {
2341
    case 0xf:
2342
        /* Effectively a 32-bit load.  */
2343
        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2344
        len = 32;
2345
        goto one_insert;
2346

    
2347
    case 0xc:
2348
    case 0x6:
2349
    case 0x3:
2350
        /* Effectively a 16-bit load.  */
2351
        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2352
        len = 16;
2353
        goto one_insert;
2354

    
2355
    case 0x8:
2356
    case 0x4:
2357
    case 0x2:
2358
    case 0x1:
2359
        /* Effectively an 8-bit load.  */
2360
        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2361
        len = 8;
2362
        goto one_insert;
2363

    
2364
    one_insert:
2365
        pos = base + ctz32(m3) * 8;
2366
        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2367
        ccm = ((1ull << len) - 1) << pos;
2368
        break;
2369

    
2370
    default:
2371
        /* This is going to be a sequence of loads and inserts.  */
2372
        pos = base + 32 - 8;
2373
        ccm = 0;
2374
        while (m3) {
2375
            if (m3 & 0x8) {
2376
                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2377
                tcg_gen_addi_i64(o->in2, o->in2, 1);
2378
                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2379
                ccm |= 0xff << pos;
2380
            }
2381
            m3 = (m3 << 1) & 0xf;
2382
            pos -= 8;
2383
        }
2384
        break;
2385
    }
2386

    
2387
    tcg_gen_movi_i64(tmp, ccm);
2388
    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2389
    tcg_temp_free_i64(tmp);
2390
    return NO_EXIT;
2391
}
2392

    
2393
static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2394
{
2395
    int shift = s->insn->data & 0xff;
2396
    int size = s->insn->data >> 8;
2397
    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2398
    return NO_EXIT;
2399
}
2400

    
2401
static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2402
{
2403
    gen_helper_ldeb(o->out, cpu_env, o->in2);
2404
    return NO_EXIT;
2405
}
2406

    
2407
static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2408
{
2409
    gen_helper_ledb(o->out, cpu_env, o->in2);
2410
    return NO_EXIT;
2411
}
2412

    
2413
static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2414
{
2415
    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2416
    return NO_EXIT;
2417
}
2418

    
2419
static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2420
{
2421
    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2422
    return NO_EXIT;
2423
}
2424

    
2425
static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2426
{
2427
    gen_helper_lxdb(o->out, cpu_env, o->in2);
2428
    return_low128(o->out2);
2429
    return NO_EXIT;
2430
}
2431

    
2432
static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2433
{
2434
    gen_helper_lxeb(o->out, cpu_env, o->in2);
2435
    return_low128(o->out2);
2436
    return NO_EXIT;
2437
}
2438

    
2439
static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2440
{
2441
    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2442
    return NO_EXIT;
2443
}
2444

    
2445
static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2446
{
2447
    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2448
    return NO_EXIT;
2449
}
2450

    
2451
static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2452
{
2453
    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2454
    return NO_EXIT;
2455
}
2456

    
2457
static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2458
{
2459
    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2460
    return NO_EXIT;
2461
}
2462

    
2463
static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2464
{
2465
    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2466
    return NO_EXIT;
2467
}
2468

    
2469
static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2470
{
2471
    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2472
    return NO_EXIT;
2473
}
2474

    
2475
static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2476
{
2477
    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2478
    return NO_EXIT;
2479
}
2480

    
2481
static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2482
{
2483
    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2484
    return NO_EXIT;
2485
}
2486

    
2487
#ifndef CONFIG_USER_ONLY
2488
static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2489
{
2490
    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2491
    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2492
    check_privileged(s);
2493
    potential_page_fault(s);
2494
    gen_helper_lctl(cpu_env, r1, o->in2, r3);
2495
    tcg_temp_free_i32(r1);
2496
    tcg_temp_free_i32(r3);
2497
    return NO_EXIT;
2498
}
2499

    
2500
static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2501
{
2502
    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2503
    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2504
    check_privileged(s);
2505
    potential_page_fault(s);
2506
    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2507
    tcg_temp_free_i32(r1);
2508
    tcg_temp_free_i32(r3);
2509
    return NO_EXIT;
2510
}
2511
static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2512
{
2513
    check_privileged(s);
2514
    potential_page_fault(s);
2515
    gen_helper_lra(o->out, cpu_env, o->in2);
2516
    set_cc_static(s);
2517
    return NO_EXIT;
2518
}
2519

    
2520
static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2521
{
2522
    TCGv_i64 t1, t2;
2523

    
2524
    check_privileged(s);
2525

    
2526
    t1 = tcg_temp_new_i64();
2527
    t2 = tcg_temp_new_i64();
2528
    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2529
    tcg_gen_addi_i64(o->in2, o->in2, 4);
2530
    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2531
    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
2532
    tcg_gen_shli_i64(t1, t1, 32);
2533
    gen_helper_load_psw(cpu_env, t1, t2);
2534
    tcg_temp_free_i64(t1);
2535
    tcg_temp_free_i64(t2);
2536
    return EXIT_NORETURN;
2537
}
2538
#endif
2539

    
2540
static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2541
{
2542
    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2543
    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2544
    potential_page_fault(s);
2545
    gen_helper_lam(cpu_env, r1, o->in2, r3);
2546
    tcg_temp_free_i32(r1);
2547
    tcg_temp_free_i32(r3);
2548
    return NO_EXIT;
2549
}
2550

    
2551
static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2552
{
2553
    int r1 = get_field(s->fields, r1);
2554
    int r3 = get_field(s->fields, r3);
2555
    TCGv_i64 t = tcg_temp_new_i64();
2556
    TCGv_i64 t4 = tcg_const_i64(4);
2557

    
2558
    while (1) {
2559
        tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2560
        store_reg32_i64(r1, t);
2561
        if (r1 == r3) {
2562
            break;
2563
        }
2564
        tcg_gen_add_i64(o->in2, o->in2, t4);
2565
        r1 = (r1 + 1) & 15;
2566
    }
2567

    
2568
    tcg_temp_free_i64(t);
2569
    tcg_temp_free_i64(t4);
2570
    return NO_EXIT;
2571
}
2572

    
2573
static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2574
{
2575
    int r1 = get_field(s->fields, r1);
2576
    int r3 = get_field(s->fields, r3);
2577
    TCGv_i64 t = tcg_temp_new_i64();
2578
    TCGv_i64 t4 = tcg_const_i64(4);
2579

    
2580
    while (1) {
2581
        tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2582
        store_reg32h_i64(r1, t);
2583
        if (r1 == r3) {
2584
            break;
2585
        }
2586
        tcg_gen_add_i64(o->in2, o->in2, t4);
2587
        r1 = (r1 + 1) & 15;
2588
    }
2589

    
2590
    tcg_temp_free_i64(t);
2591
    tcg_temp_free_i64(t4);
2592
    return NO_EXIT;
2593
}
2594

    
2595
static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2596
{
2597
    int r1 = get_field(s->fields, r1);
2598
    int r3 = get_field(s->fields, r3);
2599
    TCGv_i64 t8 = tcg_const_i64(8);
2600

    
2601
    while (1) {
2602
        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2603
        if (r1 == r3) {
2604
            break;
2605
        }
2606
        tcg_gen_add_i64(o->in2, o->in2, t8);
2607
        r1 = (r1 + 1) & 15;
2608
    }
2609

    
2610
    tcg_temp_free_i64(t8);
2611
    return NO_EXIT;
2612
}
2613

    
2614
static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2615
{
2616
    o->out = o->in2;
2617
    o->g_out = o->g_in2;
2618
    TCGV_UNUSED_I64(o->in2);
2619
    o->g_in2 = false;
2620
    return NO_EXIT;
2621
}
2622

    
2623
static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2624
{
2625
    o->out = o->in1;
2626
    o->out2 = o->in2;
2627
    o->g_out = o->g_in1;
2628
    o->g_out2 = o->g_in2;
2629
    TCGV_UNUSED_I64(o->in1);
2630
    TCGV_UNUSED_I64(o->in2);
2631
    o->g_in1 = o->g_in2 = false;
2632
    return NO_EXIT;
2633
}
2634

    
2635
static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2636
{
2637
    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2638
    potential_page_fault(s);
2639
    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2640
    tcg_temp_free_i32(l);
2641
    return NO_EXIT;
2642
}
2643

    
2644
static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2645
{
2646
    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2647
    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2648
    potential_page_fault(s);
2649
    gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2650
    tcg_temp_free_i32(r1);
2651
    tcg_temp_free_i32(r2);
2652
    set_cc_static(s);
2653
    return NO_EXIT;
2654
}
2655

    
2656
static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2657
{
2658
    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2659
    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2660
    potential_page_fault(s);
2661
    gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2662
    tcg_temp_free_i32(r1);
2663
    tcg_temp_free_i32(r3);
2664
    set_cc_static(s);
2665
    return NO_EXIT;
2666
}
2667

    
2668
#ifndef CONFIG_USER_ONLY
2669
static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2670
{
2671
    int r1 = get_field(s->fields, l1);
2672
    check_privileged(s);
2673
    potential_page_fault(s);
2674
    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2675
    set_cc_static(s);
2676
    return NO_EXIT;
2677
}
2678

    
2679
static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2680
{
2681
    int r1 = get_field(s->fields, l1);
2682
    check_privileged(s);
2683
    potential_page_fault(s);
2684
    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2685
    set_cc_static(s);
2686
    return NO_EXIT;
2687
}
2688
#endif
2689

    
2690
static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2691
{
2692
    tcg_gen_mul_i64(o->out, o->in1, o->in2);
2693
    return NO_EXIT;
2694
}
2695

    
2696
static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2697
{
2698
    gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2699
    return_low128(o->out2);
2700
    return NO_EXIT;
2701
}
2702

    
2703
static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2704
{
2705
    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2706
    return NO_EXIT;
2707
}
2708

    
2709
static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2710
{
2711
    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2712
    return NO_EXIT;
2713
}
2714

    
2715
static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2716
{
2717
    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2718
    return NO_EXIT;
2719
}
2720

    
2721
static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2722
{
2723
    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2724
    return_low128(o->out2);
2725
    return NO_EXIT;
2726
}
2727

    
2728
static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2729
{
2730
    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2731
    return_low128(o->out2);
2732
    return NO_EXIT;
2733
}
2734

    
2735
static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2736
{
2737
    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2738
    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2739
    tcg_temp_free_i64(r3);
2740
    return NO_EXIT;
2741
}
2742

    
2743
static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2744
{
2745
    int r3 = get_field(s->fields, r3);
2746
    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2747
    return NO_EXIT;
2748
}
2749

    
2750
static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2751
{
2752
    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2753
    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2754
    tcg_temp_free_i64(r3);
2755
    return NO_EXIT;
2756
}
2757

    
2758
static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2759
{
2760
    int r3 = get_field(s->fields, r3);
2761
    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2762
    return NO_EXIT;
2763
}
2764

    
2765
static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2766
{
2767
    gen_helper_nabs_i64(o->out, o->in2);
2768
    return NO_EXIT;
2769
}
2770

    
2771
static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2772
{
2773
    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2774
    return NO_EXIT;
2775
}
2776

    
2777
static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2778
{
2779
    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2780
    return NO_EXIT;
2781
}
2782

    
2783
static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2784
{
2785
    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2786
    tcg_gen_mov_i64(o->out2, o->in2);
2787
    return NO_EXIT;
2788
}
2789

    
2790
static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2791
{
2792
    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2793
    potential_page_fault(s);
2794
    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2795
    tcg_temp_free_i32(l);
2796
    set_cc_static(s);
2797
    return NO_EXIT;
2798
}
2799

    
2800
static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2801
{
2802
    tcg_gen_neg_i64(o->out, o->in2);
2803
    return NO_EXIT;
2804
}
2805

    
2806
static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2807
{
2808
    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2809
    return NO_EXIT;
2810
}
2811

    
2812
static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2813
{
2814
    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2815
    return NO_EXIT;
2816
}
2817

    
2818
static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2819
{
2820
    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2821
    tcg_gen_mov_i64(o->out2, o->in2);
2822
    return NO_EXIT;
2823
}
2824

    
2825
static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2826
{
2827
    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2828
    potential_page_fault(s);
2829
    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2830
    tcg_temp_free_i32(l);
2831
    set_cc_static(s);
2832
    return NO_EXIT;
2833
}
2834

    
2835
static ExitStatus op_or(DisasContext *s, DisasOps *o)
2836
{
2837
    tcg_gen_or_i64(o->out, o->in1, o->in2);
2838
    return NO_EXIT;
2839
}
2840

    
2841
static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2842
{
2843
    int shift = s->insn->data & 0xff;
2844
    int size = s->insn->data >> 8;
2845
    uint64_t mask = ((1ull << size) - 1) << shift;
2846

    
2847
    assert(!o->g_in2);
2848
    tcg_gen_shli_i64(o->in2, o->in2, shift);
2849
    tcg_gen_or_i64(o->out, o->in1, o->in2);
2850

    
2851
    /* Produce the CC from only the bits manipulated.  */
2852
    tcg_gen_andi_i64(cc_dst, o->out, mask);
2853
    set_cc_nz_u64(s, cc_dst);
2854
    return NO_EXIT;
2855
}
2856

    
2857
static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2858
{
2859
    tcg_gen_bswap16_i64(o->out, o->in2);
2860
    return NO_EXIT;
2861
}
2862

    
2863
static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2864
{
2865
    tcg_gen_bswap32_i64(o->out, o->in2);
2866
    return NO_EXIT;
2867
}
2868

    
2869
static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2870
{
2871
    tcg_gen_bswap64_i64(o->out, o->in2);
2872
    return NO_EXIT;
2873
}
2874

    
2875
static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2876
{
2877
    TCGv_i32 t1 = tcg_temp_new_i32();
2878
    TCGv_i32 t2 = tcg_temp_new_i32();
2879
    TCGv_i32 to = tcg_temp_new_i32();
2880
    tcg_gen_trunc_i64_i32(t1, o->in1);
2881
    tcg_gen_trunc_i64_i32(t2, o->in2);
2882
    tcg_gen_rotl_i32(to, t1, t2);
2883
    tcg_gen_extu_i32_i64(o->out, to);
2884
    tcg_temp_free_i32(t1);
2885
    tcg_temp_free_i32(t2);
2886
    tcg_temp_free_i32(to);
2887
    return NO_EXIT;
2888
}
2889

    
2890
static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2891
{
2892
    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2893
    return NO_EXIT;
2894
}
2895

    
2896
static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2897
{
2898
    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2899
    return NO_EXIT;
2900
}
2901

    
2902
static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2903
{
2904
    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2905
    return NO_EXIT;
2906
}
2907

    
2908
static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2909
{
2910
    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2911
    return_low128(o->out2);
2912
    return NO_EXIT;
2913
}
2914

    
2915
static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2916
{
2917
    gen_helper_sqeb(o->out, cpu_env, o->in2);
2918
    return NO_EXIT;
2919
}
2920

    
2921
static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2922
{
2923
    gen_helper_sqdb(o->out, cpu_env, o->in2);
2924
    return NO_EXIT;
2925
}
2926

    
2927
static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2928
{
2929
    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2930
    return_low128(o->out2);
2931
    return NO_EXIT;
2932
}
2933

    
2934
#ifndef CONFIG_USER_ONLY
2935
static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2936
{
2937
    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2938
    check_privileged(s);
2939
    potential_page_fault(s);
2940
    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2941
    tcg_temp_free_i32(r1);
2942
    return NO_EXIT;
2943
}
2944
#endif
2945

    
2946
static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2947
{
2948
    uint64_t sign = 1ull << s->insn->data;
2949
    enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2950
    gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2951
    tcg_gen_shl_i64(o->out, o->in1, o->in2);
2952
    /* The arithmetic left shift is curious in that it does not affect
2953
       the sign bit.  Copy that over from the source unchanged.  */
2954
    tcg_gen_andi_i64(o->out, o->out, ~sign);
2955
    tcg_gen_andi_i64(o->in1, o->in1, sign);
2956
    tcg_gen_or_i64(o->out, o->out, o->in1);
2957
    return NO_EXIT;
2958
}
2959

    
2960
static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2961
{
2962
    tcg_gen_shl_i64(o->out, o->in1, o->in2);
2963
    return NO_EXIT;
2964
}
2965

    
2966
static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2967
{
2968
    tcg_gen_sar_i64(o->out, o->in1, o->in2);
2969
    return NO_EXIT;
2970
}
2971

    
2972
static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2973
{
2974
    tcg_gen_shr_i64(o->out, o->in1, o->in2);
2975
    return NO_EXIT;
2976
}
2977

    
2978
#ifndef CONFIG_USER_ONLY
2979
static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2980
{
2981
    check_privileged(s);
2982
    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2983
    return NO_EXIT;
2984
}
2985

    
2986
static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2987
{
2988
    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2989
    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2990
    check_privileged(s);
2991
    potential_page_fault(s);
2992
    gen_helper_stctg(cpu_env, r1, o->in2, r3);
2993
    tcg_temp_free_i32(r1);
2994
    tcg_temp_free_i32(r3);
2995
    return NO_EXIT;
2996
}
2997

    
2998
static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
2999
{
3000
    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3001
    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3002
    check_privileged(s);
3003
    potential_page_fault(s);
3004
    gen_helper_stctl(cpu_env, r1, o->in2, r3);
3005
    tcg_temp_free_i32(r1);
3006
    tcg_temp_free_i32(r3);
3007
    return NO_EXIT;
3008
}
3009

    
3010
static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3011
{
3012
    uint64_t i2 = get_field(s->fields, i2);
3013
    TCGv_i64 t;
3014

    
3015
    check_privileged(s);
3016

    
3017
    /* It is important to do what the instruction name says: STORE THEN.
3018
       If we let the output hook perform the store then if we fault and
3019
       restart, we'll have the wrong SYSTEM MASK in place.  */
3020
    t = tcg_temp_new_i64();
3021
    tcg_gen_shri_i64(t, psw_mask, 56);
3022
    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3023
    tcg_temp_free_i64(t);
3024

    
3025
    if (s->fields->op == 0xac) {
3026
        tcg_gen_andi_i64(psw_mask, psw_mask,
3027
                         (i2 << 56) | 0x00ffffffffffffffull);
3028
    } else {
3029
        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3030
    }
3031
    return NO_EXIT;
3032
}
3033
#endif
3034

    
3035
static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3036
{
3037
    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3038
    return NO_EXIT;
3039
}
3040

    
3041
static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3042
{
3043
    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3044
    return NO_EXIT;
3045
}
3046

    
3047
static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3048
{
3049
    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3050
    return NO_EXIT;
3051
}
3052

    
3053
static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3054
{
3055
    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3056
    return NO_EXIT;
3057
}
3058

    
3059
static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3060
{
3061
    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3062
    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3063
    potential_page_fault(s);
3064
    gen_helper_stam(cpu_env, r1, o->in2, r3);
3065
    tcg_temp_free_i32(r1);
3066
    tcg_temp_free_i32(r3);
3067
    return NO_EXIT;
3068
}
3069

    
3070
static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3071
{
3072
    int m3 = get_field(s->fields, m3);
3073
    int pos, base = s->insn->data;
3074
    TCGv_i64 tmp = tcg_temp_new_i64();
3075

    
3076
    pos = base + ctz32(m3) * 8;
3077
    switch (m3) {
3078
    case 0xf:
3079
        /* Effectively a 32-bit store.  */
3080
        tcg_gen_shri_i64(tmp, o->in1, pos);
3081
        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3082
        break;
3083

    
3084
    case 0xc:
3085
    case 0x6:
3086
    case 0x3:
3087
        /* Effectively a 16-bit store.  */
3088
        tcg_gen_shri_i64(tmp, o->in1, pos);
3089
        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3090
        break;
3091

    
3092
    case 0x8:
3093
    case 0x4:
3094
    case 0x2:
3095
    case 0x1:
3096
        /* Effectively an 8-bit store.  */
3097
        tcg_gen_shri_i64(tmp, o->in1, pos);
3098
        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3099
        break;
3100

    
3101
    default:
3102
        /* This is going to be a sequence of shifts and stores.  */
3103
        pos = base + 32 - 8;
3104
        while (m3) {
3105
            if (m3 & 0x8) {
3106
                tcg_gen_shri_i64(tmp, o->in1, pos);
3107
                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3108
                tcg_gen_addi_i64(o->in2, o->in2, 1);
3109
            }
3110
            m3 = (m3 << 1) & 0xf;
3111
            pos -= 8;
3112
        }
3113
        break;
3114
    }
3115
    tcg_temp_free_i64(tmp);
3116
    return NO_EXIT;
3117
}
3118

    
3119
static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3120
{
3121
    int r1 = get_field(s->fields, r1);
3122
    int r3 = get_field(s->fields, r3);
3123
    int size = s->insn->data;
3124
    TCGv_i64 tsize = tcg_const_i64(size);
3125

    
3126
    while (1) {
3127
        if (size == 8) {
3128
            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3129
        } else {
3130
            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3131
        }
3132
        if (r1 == r3) {
3133
            break;
3134
        }
3135
        tcg_gen_add_i64(o->in2, o->in2, tsize);
3136
        r1 = (r1 + 1) & 15;
3137
    }
3138

    
3139
    tcg_temp_free_i64(tsize);
3140
    return NO_EXIT;
3141
}
3142

    
3143
static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3144
{
3145
    int r1 = get_field(s->fields, r1);
3146
    int r3 = get_field(s->fields, r3);
3147
    TCGv_i64 t = tcg_temp_new_i64();
3148
    TCGv_i64 t4 = tcg_const_i64(4);
3149
    TCGv_i64 t32 = tcg_const_i64(32);
3150

    
3151
    while (1) {
3152
        tcg_gen_shl_i64(t, regs[r1], t32);
3153
        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3154
        if (r1 == r3) {
3155
            break;
3156
        }
3157
        tcg_gen_add_i64(o->in2, o->in2, t4);
3158
        r1 = (r1 + 1) & 15;
3159
    }
3160

    
3161
    tcg_temp_free_i64(t);
3162
    tcg_temp_free_i64(t4);
3163
    tcg_temp_free_i64(t32);
3164
    return NO_EXIT;
3165
}
3166

    
3167
static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3168
{
3169
    tcg_gen_sub_i64(o->out, o->in1, o->in2);
3170
    return NO_EXIT;
3171
}
3172

    
3173
static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3174
{
3175
    TCGv_i64 cc;
3176

    
3177
    assert(!o->g_in2);
3178
    tcg_gen_not_i64(o->in2, o->in2);
3179
    tcg_gen_add_i64(o->out, o->in1, o->in2);
3180

    
3181
    /* XXX possible optimization point */
3182
    gen_op_calc_cc(s);
3183
    cc = tcg_temp_new_i64();
3184
    tcg_gen_extu_i32_i64(cc, cc_op);
3185
    tcg_gen_shri_i64(cc, cc, 1);
3186
    tcg_gen_add_i64(o->out, o->out, cc);
3187
    tcg_temp_free_i64(cc);
3188
    return NO_EXIT;
3189
}
3190

    
3191
static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3192
{
3193
    TCGv_i32 t;
3194

    
3195
    update_psw_addr(s);
3196
    gen_op_calc_cc(s);
3197

    
3198
    t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3199
    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3200
    tcg_temp_free_i32(t);
3201

    
3202
    t = tcg_const_i32(s->next_pc - s->pc);
3203
    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3204
    tcg_temp_free_i32(t);
3205

    
3206
    gen_exception(EXCP_SVC);
3207
    return EXIT_NORETURN;
3208
}
3209

    
3210
static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3211
{
3212
    gen_helper_tceb(cc_op, o->in1, o->in2);
3213
    set_cc_static(s);
3214
    return NO_EXIT;
3215
}
3216

    
3217
static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3218
{
3219
    gen_helper_tcdb(cc_op, o->in1, o->in2);
3220
    set_cc_static(s);
3221
    return NO_EXIT;
3222
}
3223

    
3224
static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3225
{
3226
    gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3227
    set_cc_static(s);
3228
    return NO_EXIT;
3229
}
3230

    
3231
#ifndef CONFIG_USER_ONLY
3232
static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3233
{
3234
    potential_page_fault(s);
3235
    gen_helper_tprot(cc_op, o->addr1, o->in2);
3236
    set_cc_static(s);
3237
    return NO_EXIT;
3238
}
3239
#endif
3240

    
3241
static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3242
{
3243
    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3244
    potential_page_fault(s);
3245
    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3246
    tcg_temp_free_i32(l);
3247
    set_cc_static(s);
3248
    return NO_EXIT;
3249
}
3250

    
3251
static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3252
{
3253
    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3254
    potential_page_fault(s);
3255
    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3256
    tcg_temp_free_i32(l);
3257
    return NO_EXIT;
3258
}
3259

    
3260
static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3261
{
3262
    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3263
    potential_page_fault(s);
3264
    gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3265
    tcg_temp_free_i32(l);
3266
    set_cc_static(s);
3267
    return NO_EXIT;
3268
}
3269

    
3270
static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3271
{
3272
    tcg_gen_xor_i64(o->out, o->in1, o->in2);
3273
    return NO_EXIT;
3274
}
3275

    
3276
static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3277
{
3278
    int shift = s->insn->data & 0xff;
3279
    int size = s->insn->data >> 8;
3280
    uint64_t mask = ((1ull << size) - 1) << shift;
3281

    
3282
    assert(!o->g_in2);
3283
    tcg_gen_shli_i64(o->in2, o->in2, shift);
3284
    tcg_gen_xor_i64(o->out, o->in1, o->in2);
3285

    
3286
    /* Produce the CC from only the bits manipulated.  */
3287
    tcg_gen_andi_i64(cc_dst, o->out, mask);
3288
    set_cc_nz_u64(s, cc_dst);
3289
    return NO_EXIT;
3290
}
3291

    
3292
static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3293
{
3294
    o->out = tcg_const_i64(0);
3295
    return NO_EXIT;
3296
}
3297

    
3298
static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3299
{
3300
    o->out = tcg_const_i64(0);
3301
    o->out2 = o->out;
3302
    o->g_out2 = true;
3303
    return NO_EXIT;
3304
}
3305

    
3306
/* ====================================================================== */
3307
/* The "Cc OUTput" generators.  Given the generated output (and in some cases
3308
   the original inputs), update the various cc data structures in order to
3309
   be able to compute the new condition code.  */
3310

    
3311
static void cout_abs32(DisasContext *s, DisasOps *o)
3312
{
3313
    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3314
}
3315

    
3316
static void cout_abs64(DisasContext *s, DisasOps *o)
3317
{
3318
    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3319
}
3320

    
3321
static void cout_adds32(DisasContext *s, DisasOps *o)
3322
{
3323
    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3324
}
3325

    
3326
static void cout_adds64(DisasContext *s, DisasOps *o)
3327
{
3328
    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3329
}
3330

    
3331
static void cout_addu32(DisasContext *s, DisasOps *o)
3332
{
3333
    gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3334
}
3335

    
3336
static void cout_addu64(DisasContext *s, DisasOps *o)
3337
{
3338
    gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3339
}
3340

    
3341
static void cout_addc32(DisasContext *s, DisasOps *o)
3342
{
3343
    gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3344
}
3345

    
3346
static void cout_addc64(DisasContext *s, DisasOps *o)
3347
{
3348
    gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3349
}
3350

    
3351
static void cout_cmps32(DisasContext *s, DisasOps *o)
3352
{
3353
    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3354
}
3355

    
3356
static void cout_cmps64(DisasContext *s, DisasOps *o)
3357
{
3358
    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3359
}
3360

    
3361
static void cout_cmpu32(DisasContext *s, DisasOps *o)
3362
{
3363
    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3364
}
3365

    
3366
static void cout_cmpu64(DisasContext *s, DisasOps *o)
3367
{
3368
    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3369
}
3370

    
3371
static void cout_f32(DisasContext *s, DisasOps *o)
3372
{
3373
    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3374
}
3375

    
3376
static void cout_f64(DisasContext *s, DisasOps *o)
3377
{
3378
    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3379
}
3380

    
3381
static void cout_f128(DisasContext *s, DisasOps *o)
3382
{
3383
    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3384
}
3385

    
3386
static void cout_nabs32(DisasContext *s, DisasOps *o)
3387
{
3388
    gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3389
}
3390

    
3391
static void cout_nabs64(DisasContext *s, DisasOps *o)
3392
{
3393
    gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3394
}
3395

    
3396
static void cout_neg32(DisasContext *s, DisasOps *o)
3397
{
3398
    gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3399
}
3400

    
3401
static void cout_neg64(DisasContext *s, DisasOps *o)
3402
{
3403
    gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3404
}
3405

    
3406
static void cout_nz32(DisasContext *s, DisasOps *o)
3407
{
3408
    tcg_gen_ext32u_i64(cc_dst, o->out);
3409
    gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3410
}
3411

    
3412
static void cout_nz64(DisasContext *s, DisasOps *o)
3413
{
3414
    gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3415
}
3416

    
3417
static void cout_s32(DisasContext *s, DisasOps *o)
3418
{
3419
    gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3420
}
3421

    
3422
static void cout_s64(DisasContext *s, DisasOps *o)
3423
{
3424
    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3425
}
3426

    
3427
static void cout_subs32(DisasContext *s, DisasOps *o)
3428
{
3429
    gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3430
}
3431

    
3432
static void cout_subs64(DisasContext *s, DisasOps *o)
3433
{
3434
    gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3435
}
3436

    
3437
static void cout_subu32(DisasContext *s, DisasOps *o)
3438
{
3439
    gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3440
}
3441

    
3442
static void cout_subu64(DisasContext *s, DisasOps *o)
3443
{
3444
    gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3445
}
3446

    
3447
static void cout_subb32(DisasContext *s, DisasOps *o)
3448
{
3449
    gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3450
}
3451

    
3452
static void cout_subb64(DisasContext *s, DisasOps *o)
3453
{
3454
    gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3455
}
3456

    
3457
static void cout_tm32(DisasContext *s, DisasOps *o)
3458
{
3459
    gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3460
}
3461

    
3462
static void cout_tm64(DisasContext *s, DisasOps *o)
3463
{
3464
    gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3465
}
3466

    
3467
/* ====================================================================== */
3468
/* The "PREPeration" generators.  These initialize the DisasOps.OUT fields
3469
   with the TCG register to which we will write.  Used in combination with
3470
   the "wout" generators, in some cases we need a new temporary, and in
3471
   some cases we can write to a TCG global.  */
3472

    
3473
static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3474
{
3475
    o->out = tcg_temp_new_i64();
3476
}
3477

    
3478
static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3479
{
3480
    o->out = tcg_temp_new_i64();
3481
    o->out2 = tcg_temp_new_i64();
3482
}
3483

    
3484
static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3485
{
3486
    o->out = regs[get_field(f, r1)];
3487
    o->g_out = true;
3488
}
3489

    
3490
static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3491
{
3492
    /* ??? Specification exception: r1 must be even.  */
3493
    int r1 = get_field(f, r1);
3494
    o->out = regs[r1];
3495
    o->out2 = regs[(r1 + 1) & 15];
3496
    o->g_out = o->g_out2 = true;
3497
}
3498

    
3499
static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3500
{
3501
    o->out = fregs[get_field(f, r1)];
3502
    o->g_out = true;
3503
}
3504

    
3505
static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3506
{
3507
    /* ??? Specification exception: r1 must be < 14.  */
3508
    int r1 = get_field(f, r1);
3509
    o->out = fregs[r1];
3510
    o->out2 = fregs[(r1 + 2) & 15];
3511
    o->g_out = o->g_out2 = true;
3512
}
3513

    
3514
/* ====================================================================== */
3515
/* The "Write OUTput" generators.  These generally perform some non-trivial
3516
   copy of data to TCG globals, or to main memory.  The trivial cases are
3517
   generally handled by having a "prep" generator install the TCG global
3518
   as the destination of the operation.  */
3519

    
3520
static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3521
{
3522
    store_reg(get_field(f, r1), o->out);
3523
}
3524

    
3525
static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3526
{
3527
    int r1 = get_field(f, r1);
3528
    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3529
}
3530

    
3531
static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3532
{
3533
    int r1 = get_field(f, r1);
3534
    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3535
}
3536

    
3537
static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3538
{
3539
    store_reg32_i64(get_field(f, r1), o->out);
3540
}
3541

    
3542
static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3543
{
3544
    /* ??? Specification exception: r1 must be even.  */
3545
    int r1 = get_field(f, r1);
3546
    store_reg32_i64(r1, o->out);
3547
    store_reg32_i64((r1 + 1) & 15, o->out2);
3548
}
3549

    
3550
static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3551
{
3552
    /* ??? Specification exception: r1 must be even.  */
3553
    int r1 = get_field(f, r1);
3554
    store_reg32_i64((r1 + 1) & 15, o->out);
3555
    tcg_gen_shri_i64(o->out, o->out, 32);
3556
    store_reg32_i64(r1, o->out);
3557
}
3558

    
3559
static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3560
{
3561
    store_freg32_i64(get_field(f, r1), o->out);
3562
}
3563

    
3564
static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3565
{
3566
    store_freg(get_field(f, r1), o->out);
3567
}
3568

    
3569
static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3570
{
3571
    /* ??? Specification exception: r1 must be < 14.  */
3572
    int f1 = get_field(s->fields, r1);
3573
    store_freg(f1, o->out);
3574
    store_freg((f1 + 2) & 15, o->out2);
3575
}
3576

    
3577
static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3578
{
3579
    if (get_field(f, r1) != get_field(f, r2)) {
3580
        store_reg32_i64(get_field(f, r1), o->out);
3581
    }
3582
}
3583

    
3584
static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3585
{
3586
    if (get_field(f, r1) != get_field(f, r2)) {
3587
        store_freg32_i64(get_field(f, r1), o->out);
3588
    }
3589
}
3590

    
3591
static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3592
{
3593
    tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3594
}
3595

    
3596
static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3597
{
3598
    tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3599
}
3600

    
3601
static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3602
{
3603
    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3604
}
3605

    
3606
static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3607
{
3608
    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3609
}
3610

    
3611
static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3612
{
3613
    tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3614
}
3615

    
3616
/* ====================================================================== */
3617
/* The "INput 1" generators.  These load the first operand to an insn.  */
3618

    
3619
static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3620
{
3621
    o->in1 = load_reg(get_field(f, r1));
3622
}
3623

    
3624
static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3625
{
3626
    o->in1 = regs[get_field(f, r1)];
3627
    o->g_in1 = true;
3628
}
3629

    
3630
static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3631
{
3632
    o->in1 = tcg_temp_new_i64();
3633
    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3634
}
3635

    
3636
static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3637
{
3638
    o->in1 = tcg_temp_new_i64();
3639
    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3640
}
3641

    
3642
static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3643
{
3644
    o->in1 = tcg_temp_new_i64();
3645
    tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3646
}
3647

    
3648
static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3649
{
3650
    /* ??? Specification exception: r1 must be even.  */
3651
    int r1 = get_field(f, r1);
3652
    o->in1 = load_reg((r1 + 1) & 15);
3653
}
3654

    
3655
static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3656
{
3657
    /* ??? Specification exception: r1 must be even.  */
3658
    int r1 = get_field(f, r1);
3659
    o->in1 = tcg_temp_new_i64();
3660
    tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3661
}
3662

    
3663
static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3664
{
3665
    /* ??? Specification exception: r1 must be even.  */
3666
    int r1 = get_field(f, r1);
3667
    o->in1 = tcg_temp_new_i64();
3668
    tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3669
}
3670

    
3671
static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3672
{
3673
    /* ??? Specification exception: r1 must be even.  */
3674
    int r1 = get_field(f, r1);
3675
    o->in1 = tcg_temp_new_i64();
3676
    tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3677
}
3678

    
3679
static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3680
{
3681
    o->in1 = load_reg(get_field(f, r2));
3682
}
3683

    
3684
static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3685
{
3686
    o->in1 = load_reg(get_field(f, r3));
3687
}
3688

    
3689
static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3690
{
3691
    o->in1 = regs[get_field(f, r3)];
3692
    o->g_in1 = true;
3693
}
3694

    
3695
static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3696
{
3697
    o->in1 = tcg_temp_new_i64();
3698
    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3699
}
3700

    
3701
static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3702
{
3703
    o->in1 = tcg_temp_new_i64();
3704
    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3705
}
3706

    
3707
static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3708
{
3709
    o->in1 = load_freg32_i64(get_field(f, r1));
3710
}
3711

    
3712
static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3713
{
3714
    o->in1 = fregs[get_field(f, r1)];
3715
    o->g_in1 = true;
3716
}
3717

    
3718
static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3719
{
3720
    /* ??? Specification exception: r1 must be < 14.  */
3721
    int r1 = get_field(f, r1);
3722
    o->out = fregs[r1];
3723
    o->out2 = fregs[(r1 + 2) & 15];
3724
    o->g_out = o->g_out2 = true;
3725
}
3726

    
3727
static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3728
{
3729
    o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3730
}
3731

    
3732
static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3733
{
3734
    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3735
    o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3736
}
3737

    
3738
static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3739
{
3740
    in1_la1(s, f, o);
3741
    o->in1 = tcg_temp_new_i64();
3742
    tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3743
}
3744

    
3745
static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3746
{
3747
    in1_la1(s, f, o);
3748
    o->in1 = tcg_temp_new_i64();
3749
    tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3750
}
3751

    
3752
static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3753
{
3754
    in1_la1(s, f, o);
3755
    o->in1 = tcg_temp_new_i64();
3756
    tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3757
}
3758

    
3759
static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3760
{
3761
    in1_la1(s, f, o);
3762
    o->in1 = tcg_temp_new_i64();
3763
    tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3764
}
3765

    
3766
static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3767
{
3768
    in1_la1(s, f, o);
3769
    o->in1 = tcg_temp_new_i64();
3770
    tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3771
}
3772

    
3773
static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3774
{
3775
    in1_la1(s, f, o);
3776
    o->in1 = tcg_temp_new_i64();
3777
    tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3778
}
3779

    
3780
/* ====================================================================== */
3781
/* The "INput 2" generators.  These load the second operand to an insn.  */
3782

    
3783
static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3784
{
3785
    o->in2 = regs[get_field(f, r1)];
3786
    o->g_in2 = true;
3787
}
3788

    
3789
static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3790
{
3791
    o->in2 = tcg_temp_new_i64();
3792
    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3793
}
3794

    
3795
static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3796
{
3797
    o->in2 = tcg_temp_new_i64();
3798
    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3799
}
3800

    
3801
static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3802
{
3803
    o->in2 = load_reg(get_field(f, r2));
3804
}
3805

    
3806
static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3807
{
3808
    o->in2 = regs[get_field(f, r2)];
3809
    o->g_in2 = true;
3810
}
3811

    
3812
static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3813
{
3814
    int r2 = get_field(f, r2);
3815
    if (r2 != 0) {
3816
        o->in2 = load_reg(r2);
3817
    }
3818
}
3819

    
3820
static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3821
{
3822
    o->in2 = tcg_temp_new_i64();
3823
    tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3824
}
3825

    
3826
static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3827
{
3828
    o->in2 = tcg_temp_new_i64();
3829
    tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3830
}
3831

    
3832
static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3833
{
3834
    o->in2 = tcg_temp_new_i64();
3835
    tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3836
}
3837

    
3838
static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3839
{
3840
    o->in2 = tcg_temp_new_i64();
3841
    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3842
}
3843

    
3844
static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3845
{
3846
    o->in2 = load_reg(get_field(f, r3));
3847
}
3848

    
3849
static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3850
{
3851
    o->in2 = tcg_temp_new_i64();
3852
    tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3853
}
3854

    
3855
static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3856
{
3857
    o->in2 = tcg_temp_new_i64();
3858
    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3859
}
3860

    
3861
static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3862
{
3863
    o->in2 = load_freg32_i64(get_field(f, r2));
3864
}
3865

    
3866
static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3867
{
3868
    o->in2 = fregs[get_field(f, r2)];
3869
    o->g_in2 = true;
3870
}
3871

    
3872
static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3873
{
3874
    /* ??? Specification exception: r1 must be < 14.  */
3875
    int r2 = get_field(f, r2);
3876
    o->in1 = fregs[r2];
3877
    o->in2 = fregs[(r2 + 2) & 15];
3878
    o->g_in1 = o->g_in2 = true;
3879
}
3880

    
3881
static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3882
{
3883
    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3884
    o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3885
}
3886

    
3887
static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3888
{
3889
    o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3890
}
3891

    
3892
static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3893
{
3894
    help_l2_shift(s, f, o, 31);
3895
}
3896

    
3897
static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3898
{
3899
    help_l2_shift(s, f, o, 63);
3900
}
3901

    
3902
static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3903
{
3904
    in2_a2(s, f, o);
3905
    tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3906
}
3907

    
3908
static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3909
{
3910
    in2_a2(s, f, o);
3911
    tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3912
}
3913

    
3914
static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3915
{
3916
    in2_a2(s, f, o);
3917
    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3918
}
3919

    
3920
static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3921
{
3922
    in2_a2(s, f, o);
3923
    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3924
}
3925

    
3926
static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3927
{
3928
    in2_a2(s, f, o);
3929
    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3930
}
3931

    
3932
static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3933
{
3934
    in2_a2(s, f, o);
3935
    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3936
}
3937

    
3938
static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3939
{
3940
    in2_ri2(s, f, o);
3941
    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3942
}
3943

    
3944
static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3945
{
3946
    in2_ri2(s, f, o);
3947
    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3948
}
3949

    
3950
static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3951
{
3952
    in2_ri2(s, f, o);
3953
    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3954
}
3955

    
3956
static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3957
{
3958
    in2_ri2(s, f, o);
3959
    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3960
}
3961

    
3962
static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3963
{
3964
    o->in2 = tcg_const_i64(get_field(f, i2));
3965
}
3966

    
3967
static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3968
{
3969
    o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3970
}
3971

    
3972
static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3973
{
3974
    o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3975
}
3976

    
3977
static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3978
{
3979
    o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3980
}
3981

    
3982
static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3983
{
3984
    uint64_t i2 = (uint16_t)get_field(f, i2);
3985
    o->in2 = tcg_const_i64(i2 << s->insn->data);
3986
}
3987

    
3988
static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3989
{
3990
    uint64_t i2 = (uint32_t)get_field(f, i2);
3991
    o->in2 = tcg_const_i64(i2 << s->insn->data);
3992
}
3993

    
3994
/* ====================================================================== */
3995

    
3996
/* Find opc within the table of insns.  This is formulated as a switch
3997
   statement so that (1) we get compile-time notice of cut-paste errors
3998
   for duplicated opcodes, and (2) the compiler generates the binary
3999
   search tree, rather than us having to post-process the table.  */
4000

    
4001
#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4002
    D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4003

    
4004
#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4005

    
4006
enum DisasInsnEnum {
4007
#include "insn-data.def"
4008
};
4009

    
4010
#undef D
4011
#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4012
    .opc = OPC,                           \
4013
    .fmt = FMT_##FT,                      \
4014
    .fac = FAC_##FC,                      \
4015
    .name = #NM,                          \
4016
    .help_in1 = in1_##I1,                 \
4017
    .help_in2 = in2_##I2,                 \
4018
    .help_prep = prep_##P,                \
4019
    .help_wout = wout_##W,                \
4020
    .help_cout = cout_##CC,               \
4021
    .help_op = op_##OP,                   \
4022
    .data = D                             \
4023
 },
4024

    
4025
/* Allow 0 to be used for NULL in the table below.  */
4026
#define in1_0  NULL
4027
#define in2_0  NULL
4028
#define prep_0  NULL
4029
#define wout_0  NULL
4030
#define cout_0  NULL
4031
#define op_0  NULL
4032

    
4033
static const DisasInsn insn_info[] = {
4034
#include "insn-data.def"
4035
};
4036

    
4037
#undef D
4038
#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4039
    case OPC: return &insn_info[insn_ ## NM];
4040

    
4041
static const DisasInsn *lookup_opc(uint16_t opc)
4042
{
4043
    switch (opc) {
4044
#include "insn-data.def"
4045
    default:
4046
        return NULL;
4047
    }
4048
}
4049

    
4050
#undef D
4051
#undef C
4052

    
4053
/* Extract a field from the insn.  The INSN should be left-aligned in
4054
   the uint64_t so that we can more easily utilize the big-bit-endian
4055
   definitions we extract from the Principals of Operation.  */
4056

    
4057
static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4058
{
4059
    uint32_t r, m;
4060

    
4061
    if (f->size == 0) {
4062
        return;
4063
    }
4064

    
4065
    /* Zero extract the field from the insn.  */
4066
    r = (insn << f->beg) >> (64 - f->size);
4067

    
4068
    /* Sign-extend, or un-swap the field as necessary.  */
4069
    switch (f->type) {
4070
    case 0: /* unsigned */
4071
        break;
4072
    case 1: /* signed */
4073
        assert(f->size <= 32);
4074
        m = 1u << (f->size - 1);
4075
        r = (r ^ m) - m;
4076
        break;
4077
    case 2: /* dl+dh split, signed 20 bit. */
4078
        r = ((int8_t)r << 12) | (r >> 8);
4079
        break;
4080
    default:
4081
        abort();
4082
    }
4083

    
4084
    /* Validate that the "compressed" encoding we selected above is valid.
4085
       I.e. we havn't make two different original fields overlap.  */
4086
    assert(((o->presentC >> f->indexC) & 1) == 0);
4087
    o->presentC |= 1 << f->indexC;
4088
    o->presentO |= 1 << f->indexO;
4089

    
4090
    o->c[f->indexC] = r;
4091
}
4092

    
4093
/* Lookup the insn at the current PC, extracting the operands into O and
4094
   returning the info struct for the insn.  Returns NULL for invalid insn.  */
4095

    
4096
static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4097
                                     DisasFields *f)
4098
{
4099
    uint64_t insn, pc = s->pc;
4100
    int op, op2, ilen;
4101
    const DisasInsn *info;
4102

    
4103
    insn = ld_code2(env, pc);
4104
    op = (insn >> 8) & 0xff;
4105
    ilen = get_ilen(op);
4106
    s->next_pc = s->pc + ilen;
4107

    
4108
    switch (ilen) {
4109
    case 2:
4110
        insn = insn << 48;
4111
        break;
4112
    case 4:
4113
        insn = ld_code4(env, pc) << 32;
4114
        break;
4115
    case 6:
4116
        insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4117
        break;
4118
    default:
4119
        abort();
4120
    }
4121

    
4122
    /* We can't actually determine the insn format until we've looked up
4123
       the full insn opcode.  Which we can't do without locating the
4124
       secondary opcode.  Assume by default that OP2 is at bit 40; for
4125
       those smaller insns that don't actually have a secondary opcode
4126
       this will correctly result in OP2 = 0. */
4127
    switch (op) {
4128
    case 0x01: /* E */
4129
    case 0x80: /* S */
4130
    case 0x82: /* S */
4131
    case 0x93: /* S */
4132
    case 0xb2: /* S, RRF, RRE */
4133
    case 0xb3: /* RRE, RRD, RRF */
4134
    case 0xb9: /* RRE, RRF */
4135
    case 0xe5: /* SSE, SIL */
4136
        op2 = (insn << 8) >> 56;
4137
        break;
4138
    case 0xa5: /* RI */
4139
    case 0xa7: /* RI */
4140
    case 0xc0: /* RIL */
4141
    case 0xc2: /* RIL */
4142
    case 0xc4: /* RIL */
4143
    case 0xc6: /* RIL */
4144
    case 0xc8: /* SSF */
4145
    case 0xcc: /* RIL */
4146
        op2 = (insn << 12) >> 60;
4147
        break;
4148
    case 0xd0 ... 0xdf: /* SS */
4149
    case 0xe1: /* SS */
4150
    case 0xe2: /* SS */
4151
    case 0xe8: /* SS */
4152
    case 0xe9: /* SS */
4153
    case 0xea: /* SS */
4154
    case 0xee ... 0xf3: /* SS */
4155
    case 0xf8 ... 0xfd: /* SS */
4156
        op2 = 0;
4157
        break;
4158
    default:
4159
        op2 = (insn << 40) >> 56;
4160
        break;
4161
    }
4162

    
4163
    memset(f, 0, sizeof(*f));
4164
    f->op = op;
4165
    f->op2 = op2;
4166

    
4167
    /* Lookup the instruction.  */
4168
    info = lookup_opc(op << 8 | op2);
4169

    
4170
    /* If we found it, extract the operands.  */
4171
    if (info != NULL) {
4172
        DisasFormat fmt = info->fmt;
4173
        int i;
4174

    
4175
        for (i = 0; i < NUM_C_FIELD; ++i) {
4176
            extract_field(f, &format_info[fmt].op[i], insn);
4177
        }
4178
    }
4179
    return info;
4180
}
4181

    
4182
static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4183
{
4184
    const DisasInsn *insn;
4185
    ExitStatus ret = NO_EXIT;
4186
    DisasFields f;
4187
    DisasOps o;
4188

    
4189
    insn = extract_insn(env, s, &f);
4190

    
4191
    /* If not found, try the old interpreter.  This includes ILLOPC.  */
4192
    if (insn == NULL) {
4193
        disas_s390_insn(env, s);
4194
        switch (s->is_jmp) {
4195
        case DISAS_NEXT:
4196
            ret = NO_EXIT;
4197
            break;
4198
        case DISAS_TB_JUMP:
4199
            ret = EXIT_GOTO_TB;
4200
            break;
4201
        case DISAS_JUMP:
4202
            ret = EXIT_PC_UPDATED;
4203
            break;
4204
        case DISAS_EXCP:
4205
            ret = EXIT_NORETURN;
4206
            break;
4207
        default:
4208
            abort();
4209
        }
4210

    
4211
        s->pc = s->next_pc;
4212
        return ret;
4213
    }
4214

    
4215
    /* Set up the strutures we use to communicate with the helpers. */
4216
    s->insn = insn;
4217
    s->fields = &f;
4218
    o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4219
    TCGV_UNUSED_I64(o.out);
4220
    TCGV_UNUSED_I64(o.out2);
4221
    TCGV_UNUSED_I64(o.in1);
4222
    TCGV_UNUSED_I64(o.in2);
4223
    TCGV_UNUSED_I64(o.addr1);
4224

    
4225
    /* Implement the instruction.  */
4226
    if (insn->help_in1) {
4227
        insn->help_in1(s, &f, &o);
4228
    }
4229
    if (insn->help_in2) {
4230
        insn->help_in2(s, &f, &o);
4231
    }
4232
    if (insn->help_prep) {
4233
        insn->help_prep(s, &f, &o);
4234
    }
4235
    if (insn->help_op) {
4236
        ret = insn->help_op(s, &o);
4237
    }
4238
    if (insn->help_wout) {
4239
        insn->help_wout(s, &f, &o);
4240
    }
4241
    if (insn->help_cout) {
4242
        insn->help_cout(s, &o);
4243
    }
4244

    
4245
    /* Free any temporaries created by the helpers.  */
4246
    if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4247
        tcg_temp_free_i64(o.out);
4248
    }
4249
    if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4250
        tcg_temp_free_i64(o.out2);
4251
    }
4252
    if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4253
        tcg_temp_free_i64(o.in1);
4254
    }
4255
    if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4256
        tcg_temp_free_i64(o.in2);
4257
    }
4258
    if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4259
        tcg_temp_free_i64(o.addr1);
4260
    }
4261

    
4262
    /* Advance to the next instruction.  */
4263
    s->pc = s->next_pc;
4264
    return ret;
4265
}
4266

    
4267
static inline void gen_intermediate_code_internal(CPUS390XState *env,
4268
                                                  TranslationBlock *tb,
4269
                                                  int search_pc)
4270
{
4271
    DisasContext dc;
4272
    target_ulong pc_start;
4273
    uint64_t next_page_start;
4274
    uint16_t *gen_opc_end;
4275
    int j, lj = -1;
4276
    int num_insns, max_insns;
4277
    CPUBreakpoint *bp;
4278
    ExitStatus status;
4279
    bool do_debug;
4280

    
4281
    pc_start = tb->pc;
4282

    
4283
    /* 31-bit mode */
4284
    if (!(tb->flags & FLAG_MASK_64)) {
4285
        pc_start &= 0x7fffffff;
4286
    }
4287

    
4288
    dc.tb = tb;
4289
    dc.pc = pc_start;
4290
    dc.cc_op = CC_OP_DYNAMIC;
4291
    do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4292
    dc.is_jmp = DISAS_NEXT;
4293

    
4294
    gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4295

    
4296
    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4297

    
4298
    num_insns = 0;
4299
    max_insns = tb->cflags & CF_COUNT_MASK;
4300
    if (max_insns == 0) {
4301
        max_insns = CF_COUNT_MASK;
4302
    }
4303

    
4304
    gen_icount_start();
4305

    
4306
    do {
4307
        if (search_pc) {
4308
            j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4309
            if (lj < j) {
4310
                lj++;
4311
                while (lj < j) {
4312
                    tcg_ctx.gen_opc_instr_start[lj++] = 0;
4313
                }
4314
            }
4315
            tcg_ctx.gen_opc_pc[lj] = dc.pc;
4316
            gen_opc_cc_op[lj] = dc.cc_op;
4317
            tcg_ctx.gen_opc_instr_start[lj] = 1;
4318
            tcg_ctx.gen_opc_icount[lj] = num_insns;
4319
        }
4320
        if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4321
            gen_io_start();
4322
        }
4323

    
4324
        if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4325
            tcg_gen_debug_insn_start(dc.pc);
4326
        }
4327

    
4328
        status = NO_EXIT;
4329
        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4330
            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4331
                if (bp->pc == dc.pc) {
4332
                    status = EXIT_PC_STALE;
4333
                    do_debug = true;
4334
                    break;
4335
                }
4336
            }
4337
        }
4338
        if (status == NO_EXIT) {
4339
            status = translate_one(env, &dc);
4340
        }
4341

    
4342
        /* If we reach a page boundary, are single stepping,
4343
           or exhaust instruction count, stop generation.  */
4344
        if (status == NO_EXIT
4345
            && (dc.pc >= next_page_start
4346
                || tcg_ctx.gen_opc_ptr >= gen_opc_end
4347
                || num_insns >= max_insns
4348
                || singlestep
4349
                || env->singlestep_enabled)) {
4350
            status = EXIT_PC_STALE;
4351
        }
4352
    } while (status == NO_EXIT);
4353

    
4354
    if (tb->cflags & CF_LAST_IO) {
4355
        gen_io_end();
4356
    }
4357

    
4358
    switch (status) {
4359
    case EXIT_GOTO_TB:
4360
    case EXIT_NORETURN:
4361
        break;
4362
    case EXIT_PC_STALE:
4363
        update_psw_addr(&dc);
4364
        /* FALLTHRU */
4365
    case EXIT_PC_UPDATED:
4366
        if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4367
            gen_op_calc_cc(&dc);
4368
        } else {
4369
            /* Next TB starts off with CC_OP_DYNAMIC,
4370
               so make sure the cc op type is in env */
4371
            gen_op_set_cc_op(&dc);
4372
        }
4373
        if (do_debug) {
4374
            gen_exception(EXCP_DEBUG);
4375
        } else {
4376
            /* Generate the return instruction */
4377
            tcg_gen_exit_tb(0);
4378
        }
4379
        break;
4380
    default:
4381
        abort();
4382
    }
4383

    
4384
    gen_icount_end(tb, num_insns);
4385
    *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4386
    if (search_pc) {
4387
        j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4388
        lj++;
4389
        while (lj <= j) {
4390
            tcg_ctx.gen_opc_instr_start[lj++] = 0;
4391
        }
4392
    } else {
4393
        tb->size = dc.pc - pc_start;
4394
        tb->icount = num_insns;
4395
    }
4396

    
4397
#if defined(S390X_DEBUG_DISAS)
4398
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4399
        qemu_log("IN: %s\n", lookup_symbol(pc_start));
4400
        log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4401
        qemu_log("\n");
4402
    }
4403
#endif
4404
}
4405

    
4406
void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4407
{
4408
    gen_intermediate_code_internal(env, tb, 0);
4409
}
4410

    
4411
void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4412
{
4413
    gen_intermediate_code_internal(env, tb, 1);
4414
}
4415

    
4416
void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4417
{
4418
    int cc_op;
4419
    env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4420
    cc_op = gen_opc_cc_op[pc_pos];
4421
    if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4422
        env->cc_op = cc_op;
4423
    }
4424
}