Statistics
| Branch: | Revision:

root / target-alpha / translate.c @ 76cad711

History | View | Annotate | Download (106.4 kB)

1
/*
2
 *  Alpha emulation cpu translation for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "cpu.h"
21
#include "disas/disas.h"
22
#include "host-utils.h"
23
#include "tcg-op.h"
24

    
25
#include "helper.h"
26
#define GEN_HELPER 1
27
#include "helper.h"
28

    
29
#undef ALPHA_DEBUG_DISAS
30
#define CONFIG_SOFTFLOAT_INLINE
31

    
32
#ifdef ALPHA_DEBUG_DISAS
33
#  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34
#else
35
#  define LOG_DISAS(...) do { } while (0)
36
#endif
37

    
38
typedef struct DisasContext DisasContext;
39
struct DisasContext {
40
    struct TranslationBlock *tb;
41
    CPUAlphaState *env;
42
    uint64_t pc;
43
    int mem_idx;
44

    
45
    /* Current rounding mode for this TB.  */
46
    int tb_rm;
47
    /* Current flush-to-zero setting for this TB.  */
48
    int tb_ftz;
49
};
50

    
51
/* Return values from translate_one, indicating the state of the TB.
52
   Note that zero indicates that we are not exiting the TB.  */
53

    
54
typedef enum {
55
    NO_EXIT,
56

    
57
    /* We have emitted one or more goto_tb.  No fixup required.  */
58
    EXIT_GOTO_TB,
59

    
60
    /* We are not using a goto_tb (for whatever reason), but have updated
61
       the PC (for whatever reason), so there's no need to do it again on
62
       exiting the TB.  */
63
    EXIT_PC_UPDATED,
64

    
65
    /* We are exiting the TB, but have neither emitted a goto_tb, nor
66
       updated the PC for the next instruction to be executed.  */
67
    EXIT_PC_STALE,
68

    
69
    /* We are ending the TB with a noreturn function call, e.g. longjmp.
70
       No following code will be executed.  */
71
    EXIT_NORETURN,
72
} ExitStatus;
73

    
74
/* global register indexes */
75
static TCGv_ptr cpu_env;
76
static TCGv cpu_ir[31];
77
static TCGv cpu_fir[31];
78
static TCGv cpu_pc;
79
static TCGv cpu_lock_addr;
80
static TCGv cpu_lock_st_addr;
81
static TCGv cpu_lock_value;
82
static TCGv cpu_unique;
83
#ifndef CONFIG_USER_ONLY
84
static TCGv cpu_sysval;
85
static TCGv cpu_usp;
86
#endif
87

    
88
/* register names */
89
static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
90

    
91
#include "gen-icount.h"
92

    
93
static void alpha_translate_init(void)
94
{
95
    int i;
96
    char *p;
97
    static int done_init = 0;
98

    
99
    if (done_init)
100
        return;
101

    
102
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103

    
104
    p = cpu_reg_names;
105
    for (i = 0; i < 31; i++) {
106
        sprintf(p, "ir%d", i);
107
        cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
108
                                           offsetof(CPUAlphaState, ir[i]), p);
109
        p += (i < 10) ? 4 : 5;
110

    
111
        sprintf(p, "fir%d", i);
112
        cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
113
                                            offsetof(CPUAlphaState, fir[i]), p);
114
        p += (i < 10) ? 5 : 6;
115
    }
116

    
117
    cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
118
                                    offsetof(CPUAlphaState, pc), "pc");
119

    
120
    cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
121
                                           offsetof(CPUAlphaState, lock_addr),
122
                                           "lock_addr");
123
    cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
124
                                              offsetof(CPUAlphaState, lock_st_addr),
125
                                              "lock_st_addr");
126
    cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
127
                                            offsetof(CPUAlphaState, lock_value),
128
                                            "lock_value");
129

    
130
    cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
131
                                        offsetof(CPUAlphaState, unique), "unique");
132
#ifndef CONFIG_USER_ONLY
133
    cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
134
                                        offsetof(CPUAlphaState, sysval), "sysval");
135
    cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
136
                                     offsetof(CPUAlphaState, usp), "usp");
137
#endif
138

    
139
    /* register helpers */
140
#define GEN_HELPER 2
141
#include "helper.h"
142

    
143
    done_init = 1;
144
}
145

    
146
static void gen_excp_1(int exception, int error_code)
147
{
148
    TCGv_i32 tmp1, tmp2;
149

    
150
    tmp1 = tcg_const_i32(exception);
151
    tmp2 = tcg_const_i32(error_code);
152
    gen_helper_excp(cpu_env, tmp1, tmp2);
153
    tcg_temp_free_i32(tmp2);
154
    tcg_temp_free_i32(tmp1);
155
}
156

    
157
static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
158
{
159
    tcg_gen_movi_i64(cpu_pc, ctx->pc);
160
    gen_excp_1(exception, error_code);
161
    return EXIT_NORETURN;
162
}
163

    
164
static inline ExitStatus gen_invalid(DisasContext *ctx)
165
{
166
    return gen_excp(ctx, EXCP_OPCDEC, 0);
167
}
168

    
169
static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
170
{
171
    TCGv tmp = tcg_temp_new();
172
    TCGv_i32 tmp32 = tcg_temp_new_i32();
173
    tcg_gen_qemu_ld32u(tmp, t1, flags);
174
    tcg_gen_trunc_i64_i32(tmp32, tmp);
175
    gen_helper_memory_to_f(t0, tmp32);
176
    tcg_temp_free_i32(tmp32);
177
    tcg_temp_free(tmp);
178
}
179

    
180
static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
181
{
182
    TCGv tmp = tcg_temp_new();
183
    tcg_gen_qemu_ld64(tmp, t1, flags);
184
    gen_helper_memory_to_g(t0, tmp);
185
    tcg_temp_free(tmp);
186
}
187

    
188
static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
189
{
190
    TCGv tmp = tcg_temp_new();
191
    TCGv_i32 tmp32 = tcg_temp_new_i32();
192
    tcg_gen_qemu_ld32u(tmp, t1, flags);
193
    tcg_gen_trunc_i64_i32(tmp32, tmp);
194
    gen_helper_memory_to_s(t0, tmp32);
195
    tcg_temp_free_i32(tmp32);
196
    tcg_temp_free(tmp);
197
}
198

    
199
static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
200
{
201
    tcg_gen_qemu_ld32s(t0, t1, flags);
202
    tcg_gen_mov_i64(cpu_lock_addr, t1);
203
    tcg_gen_mov_i64(cpu_lock_value, t0);
204
}
205

    
206
static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
207
{
208
    tcg_gen_qemu_ld64(t0, t1, flags);
209
    tcg_gen_mov_i64(cpu_lock_addr, t1);
210
    tcg_gen_mov_i64(cpu_lock_value, t0);
211
}
212

    
213
static inline void gen_load_mem(DisasContext *ctx,
214
                                void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
215
                                                          int flags),
216
                                int ra, int rb, int32_t disp16, int fp,
217
                                int clear)
218
{
219
    TCGv addr, va;
220

    
221
    /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
222
       prefetches, which we can treat as nops.  No worries about
223
       missed exceptions here.  */
224
    if (unlikely(ra == 31)) {
225
        return;
226
    }
227

    
228
    addr = tcg_temp_new();
229
    if (rb != 31) {
230
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
231
        if (clear) {
232
            tcg_gen_andi_i64(addr, addr, ~0x7);
233
        }
234
    } else {
235
        if (clear) {
236
            disp16 &= ~0x7;
237
        }
238
        tcg_gen_movi_i64(addr, disp16);
239
    }
240

    
241
    va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
242
    tcg_gen_qemu_load(va, addr, ctx->mem_idx);
243

    
244
    tcg_temp_free(addr);
245
}
246

    
247
static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
248
{
249
    TCGv_i32 tmp32 = tcg_temp_new_i32();
250
    TCGv tmp = tcg_temp_new();
251
    gen_helper_f_to_memory(tmp32, t0);
252
    tcg_gen_extu_i32_i64(tmp, tmp32);
253
    tcg_gen_qemu_st32(tmp, t1, flags);
254
    tcg_temp_free(tmp);
255
    tcg_temp_free_i32(tmp32);
256
}
257

    
258
static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
259
{
260
    TCGv tmp = tcg_temp_new();
261
    gen_helper_g_to_memory(tmp, t0);
262
    tcg_gen_qemu_st64(tmp, t1, flags);
263
    tcg_temp_free(tmp);
264
}
265

    
266
static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
267
{
268
    TCGv_i32 tmp32 = tcg_temp_new_i32();
269
    TCGv tmp = tcg_temp_new();
270
    gen_helper_s_to_memory(tmp32, t0);
271
    tcg_gen_extu_i32_i64(tmp, tmp32);
272
    tcg_gen_qemu_st32(tmp, t1, flags);
273
    tcg_temp_free(tmp);
274
    tcg_temp_free_i32(tmp32);
275
}
276

    
277
static inline void gen_store_mem(DisasContext *ctx,
278
                                 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
279
                                                            int flags),
280
                                 int ra, int rb, int32_t disp16, int fp,
281
                                 int clear)
282
{
283
    TCGv addr, va;
284

    
285
    addr = tcg_temp_new();
286
    if (rb != 31) {
287
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
288
        if (clear) {
289
            tcg_gen_andi_i64(addr, addr, ~0x7);
290
        }
291
    } else {
292
        if (clear) {
293
            disp16 &= ~0x7;
294
        }
295
        tcg_gen_movi_i64(addr, disp16);
296
    }
297

    
298
    if (ra == 31) {
299
        va = tcg_const_i64(0);
300
    } else {
301
        va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
302
    }
303
    tcg_gen_qemu_store(va, addr, ctx->mem_idx);
304

    
305
    tcg_temp_free(addr);
306
    if (ra == 31) {
307
        tcg_temp_free(va);
308
    }
309
}
310

    
311
static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
312
                                        int32_t disp16, int quad)
313
{
314
    TCGv addr;
315

    
316
    if (ra == 31) {
317
        /* ??? Don't bother storing anything.  The user can't tell
318
           the difference, since the zero register always reads zero.  */
319
        return NO_EXIT;
320
    }
321

    
322
#if defined(CONFIG_USER_ONLY)
323
    addr = cpu_lock_st_addr;
324
#else
325
    addr = tcg_temp_local_new();
326
#endif
327

    
328
    if (rb != 31) {
329
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
330
    } else {
331
        tcg_gen_movi_i64(addr, disp16);
332
    }
333

    
334
#if defined(CONFIG_USER_ONLY)
335
    /* ??? This is handled via a complicated version of compare-and-swap
336
       in the cpu_loop.  Hopefully one day we'll have a real CAS opcode
337
       in TCG so that this isn't necessary.  */
338
    return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
339
#else
340
    /* ??? In system mode we are never multi-threaded, so CAS can be
341
       implemented via a non-atomic load-compare-store sequence.  */
342
    {
343
        int lab_fail, lab_done;
344
        TCGv val;
345

    
346
        lab_fail = gen_new_label();
347
        lab_done = gen_new_label();
348
        tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
349

    
350
        val = tcg_temp_new();
351
        if (quad) {
352
            tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
353
        } else {
354
            tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
355
        }
356
        tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
357

    
358
        if (quad) {
359
            tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
360
        } else {
361
            tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
362
        }
363
        tcg_gen_movi_i64(cpu_ir[ra], 1);
364
        tcg_gen_br(lab_done);
365

    
366
        gen_set_label(lab_fail);
367
        tcg_gen_movi_i64(cpu_ir[ra], 0);
368

    
369
        gen_set_label(lab_done);
370
        tcg_gen_movi_i64(cpu_lock_addr, -1);
371

    
372
        tcg_temp_free(addr);
373
        return NO_EXIT;
374
    }
375
#endif
376
}
377

    
378
static int use_goto_tb(DisasContext *ctx, uint64_t dest)
379
{
380
    /* Check for the dest on the same page as the start of the TB.  We
381
       also want to suppress goto_tb in the case of single-steping and IO.  */
382
    return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
383
            && !ctx->env->singlestep_enabled
384
            && !(ctx->tb->cflags & CF_LAST_IO));
385
}
386

    
387
static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
388
{
389
    uint64_t dest = ctx->pc + (disp << 2);
390

    
391
    if (ra != 31) {
392
        tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
393
    }
394

    
395
    /* Notice branch-to-next; used to initialize RA with the PC.  */
396
    if (disp == 0) {
397
        return 0;
398
    } else if (use_goto_tb(ctx, dest)) {
399
        tcg_gen_goto_tb(0);
400
        tcg_gen_movi_i64(cpu_pc, dest);
401
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
402
        return EXIT_GOTO_TB;
403
    } else {
404
        tcg_gen_movi_i64(cpu_pc, dest);
405
        return EXIT_PC_UPDATED;
406
    }
407
}
408

    
409
static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
410
                                     TCGv cmp, int32_t disp)
411
{
412
    uint64_t dest = ctx->pc + (disp << 2);
413
    int lab_true = gen_new_label();
414

    
415
    if (use_goto_tb(ctx, dest)) {
416
        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
417

    
418
        tcg_gen_goto_tb(0);
419
        tcg_gen_movi_i64(cpu_pc, ctx->pc);
420
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
421

    
422
        gen_set_label(lab_true);
423
        tcg_gen_goto_tb(1);
424
        tcg_gen_movi_i64(cpu_pc, dest);
425
        tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
426

    
427
        return EXIT_GOTO_TB;
428
    } else {
429
        TCGv_i64 z = tcg_const_i64(0);
430
        TCGv_i64 d = tcg_const_i64(dest);
431
        TCGv_i64 p = tcg_const_i64(ctx->pc);
432

    
433
        tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
434

    
435
        tcg_temp_free_i64(z);
436
        tcg_temp_free_i64(d);
437
        tcg_temp_free_i64(p);
438
        return EXIT_PC_UPDATED;
439
    }
440
}
441

    
442
static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
443
                            int32_t disp, int mask)
444
{
445
    TCGv cmp_tmp;
446

    
447
    if (unlikely(ra == 31)) {
448
        cmp_tmp = tcg_const_i64(0);
449
    } else {
450
        cmp_tmp = tcg_temp_new();
451
        if (mask) {
452
            tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
453
        } else {
454
            tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
455
        }
456
    }
457

    
458
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
459
}
460

    
461
/* Fold -0.0 for comparison with COND.  */
462

    
463
static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
464
{
465
    uint64_t mzero = 1ull << 63;
466

    
467
    switch (cond) {
468
    case TCG_COND_LE:
469
    case TCG_COND_GT:
470
        /* For <= or >, the -0.0 value directly compares the way we want.  */
471
        tcg_gen_mov_i64(dest, src);
472
        break;
473

    
474
    case TCG_COND_EQ:
475
    case TCG_COND_NE:
476
        /* For == or !=, we can simply mask off the sign bit and compare.  */
477
        tcg_gen_andi_i64(dest, src, mzero - 1);
478
        break;
479

    
480
    case TCG_COND_GE:
481
    case TCG_COND_LT:
482
        /* For >= or <, map -0.0 to +0.0 via comparison and mask.  */
483
        tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
484
        tcg_gen_neg_i64(dest, dest);
485
        tcg_gen_and_i64(dest, dest, src);
486
        break;
487

    
488
    default:
489
        abort();
490
    }
491
}
492

    
493
static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
494
                             int32_t disp)
495
{
496
    TCGv cmp_tmp;
497

    
498
    if (unlikely(ra == 31)) {
499
        /* Very uncommon case, but easier to optimize it to an integer
500
           comparison than continuing with the floating point comparison.  */
501
        return gen_bcond(ctx, cond, ra, disp, 0);
502
    }
503

    
504
    cmp_tmp = tcg_temp_new();
505
    gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
506
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
507
}
508

    
509
static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
510
                     int islit, uint8_t lit, int mask)
511
{
512
    TCGv_i64 c1, z, v1;
513

    
514
    if (unlikely(rc == 31)) {
515
        return;
516
    }
517

    
518
    if (ra == 31) {
519
        /* Very uncommon case - Do not bother to optimize.  */
520
        c1 = tcg_const_i64(0);
521
    } else if (mask) {
522
        c1 = tcg_const_i64(1);
523
        tcg_gen_and_i64(c1, c1, cpu_ir[ra]);
524
    } else {
525
        c1 = cpu_ir[ra];
526
    }
527
    if (islit) {
528
        v1 = tcg_const_i64(lit);
529
    } else {
530
        v1 = cpu_ir[rb];
531
    }
532
    z = tcg_const_i64(0);
533

    
534
    tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]);
535

    
536
    tcg_temp_free_i64(z);
537
    if (ra == 31 || mask) {
538
        tcg_temp_free_i64(c1);
539
    }
540
    if (islit) {
541
        tcg_temp_free_i64(v1);
542
    }
543
}
544

    
545
static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
546
{
547
    TCGv_i64 c1, z, v1;
548

    
549
    if (unlikely(rc == 31)) {
550
        return;
551
    }
552

    
553
    c1 = tcg_temp_new_i64();
554
    if (unlikely(ra == 31)) {
555
        tcg_gen_movi_i64(c1, 0);
556
    } else {
557
        gen_fold_mzero(cond, c1, cpu_fir[ra]);
558
    }
559
    if (rb == 31) {
560
        v1 = tcg_const_i64(0);
561
    } else {
562
        v1 = cpu_fir[rb];
563
    }
564
    z = tcg_const_i64(0);
565

    
566
    tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
567

    
568
    tcg_temp_free_i64(z);
569
    tcg_temp_free_i64(c1);
570
    if (rb == 31) {
571
        tcg_temp_free_i64(v1);
572
    }
573
}
574

    
575
#define QUAL_RM_N       0x080   /* Round mode nearest even */
576
#define QUAL_RM_C       0x000   /* Round mode chopped */
577
#define QUAL_RM_M       0x040   /* Round mode minus infinity */
578
#define QUAL_RM_D       0x0c0   /* Round mode dynamic */
579
#define QUAL_RM_MASK    0x0c0
580

    
581
#define QUAL_U          0x100   /* Underflow enable (fp output) */
582
#define QUAL_V          0x100   /* Overflow enable (int output) */
583
#define QUAL_S          0x400   /* Software completion enable */
584
#define QUAL_I          0x200   /* Inexact detection enable */
585

    
586
static void gen_qual_roundmode(DisasContext *ctx, int fn11)
587
{
588
    TCGv_i32 tmp;
589

    
590
    fn11 &= QUAL_RM_MASK;
591
    if (fn11 == ctx->tb_rm) {
592
        return;
593
    }
594
    ctx->tb_rm = fn11;
595

    
596
    tmp = tcg_temp_new_i32();
597
    switch (fn11) {
598
    case QUAL_RM_N:
599
        tcg_gen_movi_i32(tmp, float_round_nearest_even);
600
        break;
601
    case QUAL_RM_C:
602
        tcg_gen_movi_i32(tmp, float_round_to_zero);
603
        break;
604
    case QUAL_RM_M:
605
        tcg_gen_movi_i32(tmp, float_round_down);
606
        break;
607
    case QUAL_RM_D:
608
        tcg_gen_ld8u_i32(tmp, cpu_env,
609
                         offsetof(CPUAlphaState, fpcr_dyn_round));
610
        break;
611
    }
612

    
613
#if defined(CONFIG_SOFTFLOAT_INLINE)
614
    /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
615
       With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
616
       sets the one field.  */
617
    tcg_gen_st8_i32(tmp, cpu_env,
618
                    offsetof(CPUAlphaState, fp_status.float_rounding_mode));
619
#else
620
    gen_helper_setroundmode(tmp);
621
#endif
622

    
623
    tcg_temp_free_i32(tmp);
624
}
625

    
626
static void gen_qual_flushzero(DisasContext *ctx, int fn11)
627
{
628
    TCGv_i32 tmp;
629

    
630
    fn11 &= QUAL_U;
631
    if (fn11 == ctx->tb_ftz) {
632
        return;
633
    }
634
    ctx->tb_ftz = fn11;
635

    
636
    tmp = tcg_temp_new_i32();
637
    if (fn11) {
638
        /* Underflow is enabled, use the FPCR setting.  */
639
        tcg_gen_ld8u_i32(tmp, cpu_env,
640
                         offsetof(CPUAlphaState, fpcr_flush_to_zero));
641
    } else {
642
        /* Underflow is disabled, force flush-to-zero.  */
643
        tcg_gen_movi_i32(tmp, 1);
644
    }
645

    
646
#if defined(CONFIG_SOFTFLOAT_INLINE)
647
    tcg_gen_st8_i32(tmp, cpu_env,
648
                    offsetof(CPUAlphaState, fp_status.flush_to_zero));
649
#else
650
    gen_helper_setflushzero(tmp);
651
#endif
652

    
653
    tcg_temp_free_i32(tmp);
654
}
655

    
656
static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
657
{
658
    TCGv val;
659
    if (reg == 31) {
660
        val = tcg_const_i64(0);
661
    } else {
662
        if ((fn11 & QUAL_S) == 0) {
663
            if (is_cmp) {
664
                gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
665
            } else {
666
                gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
667
            }
668
        }
669
        val = tcg_temp_new();
670
        tcg_gen_mov_i64(val, cpu_fir[reg]);
671
    }
672
    return val;
673
}
674

    
675
static void gen_fp_exc_clear(void)
676
{
677
#if defined(CONFIG_SOFTFLOAT_INLINE)
678
    TCGv_i32 zero = tcg_const_i32(0);
679
    tcg_gen_st8_i32(zero, cpu_env,
680
                    offsetof(CPUAlphaState, fp_status.float_exception_flags));
681
    tcg_temp_free_i32(zero);
682
#else
683
    gen_helper_fp_exc_clear(cpu_env);
684
#endif
685
}
686

    
687
static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
688
{
689
    /* ??? We ought to be able to do something with imprecise exceptions.
690
       E.g. notice we're still in the trap shadow of something within the
691
       TB and do not generate the code to signal the exception; end the TB
692
       when an exception is forced to arrive, either by consumption of a
693
       register value or TRAPB or EXCB.  */
694
    TCGv_i32 exc = tcg_temp_new_i32();
695
    TCGv_i32 reg;
696

    
697
#if defined(CONFIG_SOFTFLOAT_INLINE)
698
    tcg_gen_ld8u_i32(exc, cpu_env,
699
                     offsetof(CPUAlphaState, fp_status.float_exception_flags));
700
#else
701
    gen_helper_fp_exc_get(exc, cpu_env);
702
#endif
703

    
704
    if (ignore) {
705
        tcg_gen_andi_i32(exc, exc, ~ignore);
706
    }
707

    
708
    /* ??? Pass in the regno of the destination so that the helper can
709
       set EXC_MASK, which contains a bitmask of destination registers
710
       that have caused arithmetic traps.  A simple userspace emulation
711
       does not require this.  We do need it for a guest kernel's entArith,
712
       or if we were to do something clever with imprecise exceptions.  */
713
    reg = tcg_const_i32(rc + 32);
714

    
715
    if (fn11 & QUAL_S) {
716
        gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
717
    } else {
718
        gen_helper_fp_exc_raise(cpu_env, exc, reg);
719
    }
720

    
721
    tcg_temp_free_i32(reg);
722
    tcg_temp_free_i32(exc);
723
}
724

    
725
static inline void gen_fp_exc_raise(int rc, int fn11)
726
{
727
    gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
728
}
729

    
730
static void gen_fcvtlq(int rb, int rc)
731
{
732
    if (unlikely(rc == 31)) {
733
        return;
734
    }
735
    if (unlikely(rb == 31)) {
736
        tcg_gen_movi_i64(cpu_fir[rc], 0);
737
    } else {
738
        TCGv tmp = tcg_temp_new();
739

    
740
        /* The arithmetic right shift here, plus the sign-extended mask below
741
           yields a sign-extended result without an explicit ext32s_i64.  */
742
        tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
743
        tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
744
        tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
745
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
746
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
747

    
748
        tcg_temp_free(tmp);
749
    }
750
}
751

    
752
static void gen_fcvtql(int rb, int rc)
753
{
754
    if (unlikely(rc == 31)) {
755
        return;
756
    }
757
    if (unlikely(rb == 31)) {
758
        tcg_gen_movi_i64(cpu_fir[rc], 0);
759
    } else {
760
        TCGv tmp = tcg_temp_new();
761

    
762
        tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
763
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
764
        tcg_gen_shli_i64(tmp, tmp, 32);
765
        tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
766
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
767

    
768
        tcg_temp_free(tmp);
769
    }
770
}
771

    
772
static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
773
{
774
    if (rb != 31) {
775
        int lab = gen_new_label();
776
        TCGv tmp = tcg_temp_new();
777

    
778
        tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
779
        tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
780
        gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
781

    
782
        gen_set_label(lab);
783
    }
784
    gen_fcvtql(rb, rc);
785
}
786

    
787
#define FARITH2(name)                                                   \
788
    static inline void glue(gen_f, name)(int rb, int rc)                \
789
    {                                                                   \
790
        if (unlikely(rc == 31)) {                                       \
791
            return;                                                     \
792
        }                                                               \
793
        if (rb != 31) {                                                 \
794
            gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]);     \
795
        } else {                                                        \
796
            TCGv tmp = tcg_const_i64(0);                                \
797
            gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp);             \
798
            tcg_temp_free(tmp);                                         \
799
        }                                                               \
800
    }
801

    
802
/* ??? VAX instruction qualifiers ignored.  */
803
FARITH2(sqrtf)
804
FARITH2(sqrtg)
805
FARITH2(cvtgf)
806
FARITH2(cvtgq)
807
FARITH2(cvtqf)
808
FARITH2(cvtqg)
809

    
810
static void gen_ieee_arith2(DisasContext *ctx,
811
                            void (*helper)(TCGv, TCGv_ptr, TCGv),
812
                            int rb, int rc, int fn11)
813
{
814
    TCGv vb;
815

    
816
    /* ??? This is wrong: the instruction is not a nop, it still may
817
       raise exceptions.  */
818
    if (unlikely(rc == 31)) {
819
        return;
820
    }
821

    
822
    gen_qual_roundmode(ctx, fn11);
823
    gen_qual_flushzero(ctx, fn11);
824
    gen_fp_exc_clear();
825

    
826
    vb = gen_ieee_input(rb, fn11, 0);
827
    helper(cpu_fir[rc], cpu_env, vb);
828
    tcg_temp_free(vb);
829

    
830
    gen_fp_exc_raise(rc, fn11);
831
}
832

    
833
#define IEEE_ARITH2(name)                                       \
834
static inline void glue(gen_f, name)(DisasContext *ctx,         \
835
                                     int rb, int rc, int fn11)  \
836
{                                                               \
837
    gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
838
}
839
IEEE_ARITH2(sqrts)
840
IEEE_ARITH2(sqrtt)
841
IEEE_ARITH2(cvtst)
842
IEEE_ARITH2(cvtts)
843

    
844
static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
845
{
846
    TCGv vb;
847
    int ignore = 0;
848

    
849
    /* ??? This is wrong: the instruction is not a nop, it still may
850
       raise exceptions.  */
851
    if (unlikely(rc == 31)) {
852
        return;
853
    }
854

    
855
    /* No need to set flushzero, since we have an integer output.  */
856
    gen_fp_exc_clear();
857
    vb = gen_ieee_input(rb, fn11, 0);
858

    
859
    /* Almost all integer conversions use cropped rounding, and most
860
       also do not have integer overflow enabled.  Special case that.  */
861
    switch (fn11) {
862
    case QUAL_RM_C:
863
        gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
864
        break;
865
    case QUAL_V | QUAL_RM_C:
866
    case QUAL_S | QUAL_V | QUAL_RM_C:
867
        ignore = float_flag_inexact;
868
        /* FALLTHRU */
869
    case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
870
        gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
871
        break;
872
    default:
873
        gen_qual_roundmode(ctx, fn11);
874
        gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
875
        ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
876
        ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
877
        break;
878
    }
879
    tcg_temp_free(vb);
880

    
881
    gen_fp_exc_raise_ignore(rc, fn11, ignore);
882
}
883

    
884
static void gen_ieee_intcvt(DisasContext *ctx,
885
                            void (*helper)(TCGv, TCGv_ptr, TCGv),
886
                            int rb, int rc, int fn11)
887
{
888
    TCGv vb;
889

    
890
    /* ??? This is wrong: the instruction is not a nop, it still may
891
       raise exceptions.  */
892
    if (unlikely(rc == 31)) {
893
        return;
894
    }
895

    
896
    gen_qual_roundmode(ctx, fn11);
897

    
898
    if (rb == 31) {
899
        vb = tcg_const_i64(0);
900
    } else {
901
        vb = cpu_fir[rb];
902
    }
903

    
904
    /* The only exception that can be raised by integer conversion
905
       is inexact.  Thus we only need to worry about exceptions when
906
       inexact handling is requested.  */
907
    if (fn11 & QUAL_I) {
908
        gen_fp_exc_clear();
909
        helper(cpu_fir[rc], cpu_env, vb);
910
        gen_fp_exc_raise(rc, fn11);
911
    } else {
912
        helper(cpu_fir[rc], cpu_env, vb);
913
    }
914

    
915
    if (rb == 31) {
916
        tcg_temp_free(vb);
917
    }
918
}
919

    
920
#define IEEE_INTCVT(name)                                       \
921
static inline void glue(gen_f, name)(DisasContext *ctx,         \
922
                                     int rb, int rc, int fn11)  \
923
{                                                               \
924
    gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
925
}
926
IEEE_INTCVT(cvtqs)
927
IEEE_INTCVT(cvtqt)
928

    
929
static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
930
{
931
    TCGv va, vb, vmask;
932
    int za = 0, zb = 0;
933

    
934
    if (unlikely(rc == 31)) {
935
        return;
936
    }
937

    
938
    vmask = tcg_const_i64(mask);
939

    
940
    TCGV_UNUSED_I64(va);
941
    if (ra == 31) {
942
        if (inv_a) {
943
            va = vmask;
944
        } else {
945
            za = 1;
946
        }
947
    } else {
948
        va = tcg_temp_new_i64();
949
        tcg_gen_mov_i64(va, cpu_fir[ra]);
950
        if (inv_a) {
951
            tcg_gen_andc_i64(va, vmask, va);
952
        } else {
953
            tcg_gen_and_i64(va, va, vmask);
954
        }
955
    }
956

    
957
    TCGV_UNUSED_I64(vb);
958
    if (rb == 31) {
959
        zb = 1;
960
    } else {
961
        vb = tcg_temp_new_i64();
962
        tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
963
    }
964

    
965
    switch (za << 1 | zb) {
966
    case 0 | 0:
967
        tcg_gen_or_i64(cpu_fir[rc], va, vb);
968
        break;
969
    case 0 | 1:
970
        tcg_gen_mov_i64(cpu_fir[rc], va);
971
        break;
972
    case 2 | 0:
973
        tcg_gen_mov_i64(cpu_fir[rc], vb);
974
        break;
975
    case 2 | 1:
976
        tcg_gen_movi_i64(cpu_fir[rc], 0);
977
        break;
978
    }
979

    
980
    tcg_temp_free(vmask);
981
    if (ra != 31) {
982
        tcg_temp_free(va);
983
    }
984
    if (rb != 31) {
985
        tcg_temp_free(vb);
986
    }
987
}
988

    
989
static inline void gen_fcpys(int ra, int rb, int rc)
990
{
991
    gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
992
}
993

    
994
static inline void gen_fcpysn(int ra, int rb, int rc)
995
{
996
    gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
997
}
998

    
999
static inline void gen_fcpyse(int ra, int rb, int rc)
1000
{
1001
    gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1002
}
1003

    
1004
#define FARITH3(name)                                                   \
1005
    static inline void glue(gen_f, name)(int ra, int rb, int rc)        \
1006
    {                                                                   \
1007
        TCGv va, vb;                                                    \
1008
                                                                        \
1009
        if (unlikely(rc == 31)) {                                       \
1010
            return;                                                     \
1011
        }                                                               \
1012
        if (ra == 31) {                                                 \
1013
            va = tcg_const_i64(0);                                      \
1014
        } else {                                                        \
1015
            va = cpu_fir[ra];                                           \
1016
        }                                                               \
1017
        if (rb == 31) {                                                 \
1018
            vb = tcg_const_i64(0);                                      \
1019
        } else {                                                        \
1020
            vb = cpu_fir[rb];                                           \
1021
        }                                                               \
1022
                                                                        \
1023
        gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb);              \
1024
                                                                        \
1025
        if (ra == 31) {                                                 \
1026
            tcg_temp_free(va);                                          \
1027
        }                                                               \
1028
        if (rb == 31) {                                                 \
1029
            tcg_temp_free(vb);                                          \
1030
        }                                                               \
1031
    }
1032

    
1033
/* ??? VAX instruction qualifiers ignored.  */
1034
FARITH3(addf)
1035
FARITH3(subf)
1036
FARITH3(mulf)
1037
FARITH3(divf)
1038
FARITH3(addg)
1039
FARITH3(subg)
1040
FARITH3(mulg)
1041
FARITH3(divg)
1042
FARITH3(cmpgeq)
1043
FARITH3(cmpglt)
1044
FARITH3(cmpgle)
1045

    
1046
static void gen_ieee_arith3(DisasContext *ctx,
1047
                            void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1048
                            int ra, int rb, int rc, int fn11)
1049
{
1050
    TCGv va, vb;
1051

    
1052
    /* ??? This is wrong: the instruction is not a nop, it still may
1053
       raise exceptions.  */
1054
    if (unlikely(rc == 31)) {
1055
        return;
1056
    }
1057

    
1058
    gen_qual_roundmode(ctx, fn11);
1059
    gen_qual_flushzero(ctx, fn11);
1060
    gen_fp_exc_clear();
1061

    
1062
    va = gen_ieee_input(ra, fn11, 0);
1063
    vb = gen_ieee_input(rb, fn11, 0);
1064
    helper(cpu_fir[rc], cpu_env, va, vb);
1065
    tcg_temp_free(va);
1066
    tcg_temp_free(vb);
1067

    
1068
    gen_fp_exc_raise(rc, fn11);
1069
}
1070

    
1071
#define IEEE_ARITH3(name)                                               \
1072
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1073
                                     int ra, int rb, int rc, int fn11)  \
1074
{                                                                       \
1075
    gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
1076
}
1077
IEEE_ARITH3(adds)
1078
IEEE_ARITH3(subs)
1079
IEEE_ARITH3(muls)
1080
IEEE_ARITH3(divs)
1081
IEEE_ARITH3(addt)
1082
IEEE_ARITH3(subt)
1083
IEEE_ARITH3(mult)
1084
IEEE_ARITH3(divt)
1085

    
1086
static void gen_ieee_compare(DisasContext *ctx,
1087
                             void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1088
                             int ra, int rb, int rc, int fn11)
1089
{
1090
    TCGv va, vb;
1091

    
1092
    /* ??? This is wrong: the instruction is not a nop, it still may
1093
       raise exceptions.  */
1094
    if (unlikely(rc == 31)) {
1095
        return;
1096
    }
1097

    
1098
    gen_fp_exc_clear();
1099

    
1100
    va = gen_ieee_input(ra, fn11, 1);
1101
    vb = gen_ieee_input(rb, fn11, 1);
1102
    helper(cpu_fir[rc], cpu_env, va, vb);
1103
    tcg_temp_free(va);
1104
    tcg_temp_free(vb);
1105

    
1106
    gen_fp_exc_raise(rc, fn11);
1107
}
1108

    
1109
#define IEEE_CMP3(name)                                                 \
1110
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1111
                                     int ra, int rb, int rc, int fn11)  \
1112
{                                                                       \
1113
    gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
1114
}
1115
IEEE_CMP3(cmptun)
1116
IEEE_CMP3(cmpteq)
1117
IEEE_CMP3(cmptlt)
1118
IEEE_CMP3(cmptle)
1119

    
1120
static inline uint64_t zapnot_mask(uint8_t lit)
1121
{
1122
    uint64_t mask = 0;
1123
    int i;
1124

    
1125
    for (i = 0; i < 8; ++i) {
1126
        if ((lit >> i) & 1)
1127
            mask |= 0xffull << (i * 8);
1128
    }
1129
    return mask;
1130
}
1131

    
1132
/* Implement zapnot with an immediate operand, which expands to some
1133
   form of immediate AND.  This is a basic building block in the
1134
   definition of many of the other byte manipulation instructions.  */
1135
static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1136
{
1137
    switch (lit) {
1138
    case 0x00:
1139
        tcg_gen_movi_i64(dest, 0);
1140
        break;
1141
    case 0x01:
1142
        tcg_gen_ext8u_i64(dest, src);
1143
        break;
1144
    case 0x03:
1145
        tcg_gen_ext16u_i64(dest, src);
1146
        break;
1147
    case 0x0f:
1148
        tcg_gen_ext32u_i64(dest, src);
1149
        break;
1150
    case 0xff:
1151
        tcg_gen_mov_i64(dest, src);
1152
        break;
1153
    default:
1154
        tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1155
        break;
1156
    }
1157
}
1158

    
1159
static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1160
{
1161
    if (unlikely(rc == 31))
1162
        return;
1163
    else if (unlikely(ra == 31))
1164
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1165
    else if (islit)
1166
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1167
    else
1168
        gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1169
}
1170

    
1171
static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1172
{
1173
    if (unlikely(rc == 31))
1174
        return;
1175
    else if (unlikely(ra == 31))
1176
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1177
    else if (islit)
1178
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1179
    else
1180
        gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1181
}
1182

    
1183

    
1184
/* EXTWH, EXTLH, EXTQH */
1185
static void gen_ext_h(int ra, int rb, int rc, int islit,
1186
                      uint8_t lit, uint8_t byte_mask)
1187
{
1188
    if (unlikely(rc == 31))
1189
        return;
1190
    else if (unlikely(ra == 31))
1191
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1192
    else {
1193
        if (islit) {
1194
            lit = (64 - (lit & 7) * 8) & 0x3f;
1195
            tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1196
        } else {
1197
            TCGv tmp1 = tcg_temp_new();
1198
            tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1199
            tcg_gen_shli_i64(tmp1, tmp1, 3);
1200
            tcg_gen_neg_i64(tmp1, tmp1);
1201
            tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1202
            tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1203
            tcg_temp_free(tmp1);
1204
        }
1205
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1206
    }
1207
}
1208

    
1209
/* EXTBL, EXTWL, EXTLL, EXTQL */
1210
static void gen_ext_l(int ra, int rb, int rc, int islit,
1211
                      uint8_t lit, uint8_t byte_mask)
1212
{
1213
    if (unlikely(rc == 31))
1214
        return;
1215
    else if (unlikely(ra == 31))
1216
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1217
    else {
1218
        if (islit) {
1219
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1220
        } else {
1221
            TCGv tmp = tcg_temp_new();
1222
            tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1223
            tcg_gen_shli_i64(tmp, tmp, 3);
1224
            tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1225
            tcg_temp_free(tmp);
1226
        }
1227
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1228
    }
1229
}
1230

    
1231
/* INSWH, INSLH, INSQH */
1232
static void gen_ins_h(int ra, int rb, int rc, int islit,
1233
                      uint8_t lit, uint8_t byte_mask)
1234
{
1235
    if (unlikely(rc == 31))
1236
        return;
1237
    else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1238
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1239
    else {
1240
        TCGv tmp = tcg_temp_new();
1241

    
1242
        /* The instruction description has us left-shift the byte mask
1243
           and extract bits <15:8> and apply that zap at the end.  This
1244
           is equivalent to simply performing the zap first and shifting
1245
           afterward.  */
1246
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1247

    
1248
        if (islit) {
1249
            /* Note that we have handled the lit==0 case above.  */
1250
            tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1251
        } else {
1252
            TCGv shift = tcg_temp_new();
1253

    
1254
            /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1255
               Do this portably by splitting the shift into two parts:
1256
               shift_count-1 and 1.  Arrange for the -1 by using
1257
               ones-complement instead of twos-complement in the negation:
1258
               ~((B & 7) * 8) & 63.  */
1259

    
1260
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1261
            tcg_gen_shli_i64(shift, shift, 3);
1262
            tcg_gen_not_i64(shift, shift);
1263
            tcg_gen_andi_i64(shift, shift, 0x3f);
1264

    
1265
            tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1266
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1267
            tcg_temp_free(shift);
1268
        }
1269
        tcg_temp_free(tmp);
1270
    }
1271
}
1272

    
1273
/* INSBL, INSWL, INSLL, INSQL */
1274
static void gen_ins_l(int ra, int rb, int rc, int islit,
1275
                      uint8_t lit, uint8_t byte_mask)
1276
{
1277
    if (unlikely(rc == 31))
1278
        return;
1279
    else if (unlikely(ra == 31))
1280
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1281
    else {
1282
        TCGv tmp = tcg_temp_new();
1283

    
1284
        /* The instruction description has us left-shift the byte mask
1285
           the same number of byte slots as the data and apply the zap
1286
           at the end.  This is equivalent to simply performing the zap
1287
           first and shifting afterward.  */
1288
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1289

    
1290
        if (islit) {
1291
            tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1292
        } else {
1293
            TCGv shift = tcg_temp_new();
1294
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1295
            tcg_gen_shli_i64(shift, shift, 3);
1296
            tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1297
            tcg_temp_free(shift);
1298
        }
1299
        tcg_temp_free(tmp);
1300
    }
1301
}
1302

    
1303
/* MSKWH, MSKLH, MSKQH */
1304
static void gen_msk_h(int ra, int rb, int rc, int islit,
1305
                      uint8_t lit, uint8_t byte_mask)
1306
{
1307
    if (unlikely(rc == 31))
1308
        return;
1309
    else if (unlikely(ra == 31))
1310
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1311
    else if (islit) {
1312
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1313
    } else {
1314
        TCGv shift = tcg_temp_new();
1315
        TCGv mask = tcg_temp_new();
1316

    
1317
        /* The instruction description is as above, where the byte_mask
1318
           is shifted left, and then we extract bits <15:8>.  This can be
1319
           emulated with a right-shift on the expanded byte mask.  This
1320
           requires extra care because for an input <2:0> == 0 we need a
1321
           shift of 64 bits in order to generate a zero.  This is done by
1322
           splitting the shift into two parts, the variable shift - 1
1323
           followed by a constant 1 shift.  The code we expand below is
1324
           equivalent to ~((B & 7) * 8) & 63.  */
1325

    
1326
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1327
        tcg_gen_shli_i64(shift, shift, 3);
1328
        tcg_gen_not_i64(shift, shift);
1329
        tcg_gen_andi_i64(shift, shift, 0x3f);
1330
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1331
        tcg_gen_shr_i64(mask, mask, shift);
1332
        tcg_gen_shri_i64(mask, mask, 1);
1333

    
1334
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1335

    
1336
        tcg_temp_free(mask);
1337
        tcg_temp_free(shift);
1338
    }
1339
}
1340

    
1341
/* MSKBL, MSKWL, MSKLL, MSKQL */
1342
static void gen_msk_l(int ra, int rb, int rc, int islit,
1343
                      uint8_t lit, uint8_t byte_mask)
1344
{
1345
    if (unlikely(rc == 31))
1346
        return;
1347
    else if (unlikely(ra == 31))
1348
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1349
    else if (islit) {
1350
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1351
    } else {
1352
        TCGv shift = tcg_temp_new();
1353
        TCGv mask = tcg_temp_new();
1354

    
1355
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1356
        tcg_gen_shli_i64(shift, shift, 3);
1357
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1358
        tcg_gen_shl_i64(mask, mask, shift);
1359

    
1360
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1361

    
1362
        tcg_temp_free(mask);
1363
        tcg_temp_free(shift);
1364
    }
1365
}
1366

    
1367
/* Code to call arith3 helpers */
1368
#define ARITH3(name)                                                  \
1369
static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1370
                                    uint8_t lit)                      \
1371
{                                                                     \
1372
    if (unlikely(rc == 31))                                           \
1373
        return;                                                       \
1374
                                                                      \
1375
    if (ra != 31) {                                                   \
1376
        if (islit) {                                                  \
1377
            TCGv tmp = tcg_const_i64(lit);                            \
1378
            gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp);         \
1379
            tcg_temp_free(tmp);                                       \
1380
        } else                                                        \
1381
            gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1382
    } else {                                                          \
1383
        TCGv tmp1 = tcg_const_i64(0);                                 \
1384
        if (islit) {                                                  \
1385
            TCGv tmp2 = tcg_const_i64(lit);                           \
1386
            gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2);             \
1387
            tcg_temp_free(tmp2);                                      \
1388
        } else                                                        \
1389
            gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]);       \
1390
        tcg_temp_free(tmp1);                                          \
1391
    }                                                                 \
1392
}
1393
ARITH3(umulh)
1394
ARITH3(cmpbge)
1395
ARITH3(minub8)
1396
ARITH3(minsb8)
1397
ARITH3(minuw4)
1398
ARITH3(minsw4)
1399
ARITH3(maxub8)
1400
ARITH3(maxsb8)
1401
ARITH3(maxuw4)
1402
ARITH3(maxsw4)
1403
ARITH3(perr)
1404

    
1405
/* Code to call arith3 helpers */
1406
#define ARITH3_EX(name)                                                 \
1407
    static inline void glue(gen_, name)(int ra, int rb, int rc,         \
1408
                                        int islit, uint8_t lit)         \
1409
    {                                                                   \
1410
        if (unlikely(rc == 31)) {                                       \
1411
            return;                                                     \
1412
        }                                                               \
1413
        if (ra != 31) {                                                 \
1414
            if (islit) {                                                \
1415
                TCGv tmp = tcg_const_i64(lit);                          \
1416
                gen_helper_ ## name(cpu_ir[rc], cpu_env,                \
1417
                                    cpu_ir[ra], tmp);                   \
1418
                tcg_temp_free(tmp);                                     \
1419
            } else {                                                    \
1420
                gen_helper_ ## name(cpu_ir[rc], cpu_env,                \
1421
                                    cpu_ir[ra], cpu_ir[rb]);            \
1422
            }                                                           \
1423
        } else {                                                        \
1424
            TCGv tmp1 = tcg_const_i64(0);                               \
1425
            if (islit) {                                                \
1426
                TCGv tmp2 = tcg_const_i64(lit);                         \
1427
                gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2);   \
1428
                tcg_temp_free(tmp2);                                    \
1429
            } else {                                                    \
1430
                gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1431
            }                                                           \
1432
            tcg_temp_free(tmp1);                                        \
1433
        }                                                               \
1434
    }
1435
ARITH3_EX(addlv)
1436
ARITH3_EX(sublv)
1437
ARITH3_EX(addqv)
1438
ARITH3_EX(subqv)
1439
ARITH3_EX(mullv)
1440
ARITH3_EX(mulqv)
1441

    
1442
#define MVIOP2(name)                                    \
1443
static inline void glue(gen_, name)(int rb, int rc)     \
1444
{                                                       \
1445
    if (unlikely(rc == 31))                             \
1446
        return;                                         \
1447
    if (unlikely(rb == 31))                             \
1448
        tcg_gen_movi_i64(cpu_ir[rc], 0);                \
1449
    else                                                \
1450
        gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]);   \
1451
}
1452
MVIOP2(pklb)
1453
MVIOP2(pkwb)
1454
MVIOP2(unpkbl)
1455
MVIOP2(unpkbw)
1456

    
1457
static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1458
                    int islit, uint8_t lit)
1459
{
1460
    TCGv va, vb;
1461

    
1462
    if (unlikely(rc == 31)) {
1463
        return;
1464
    }
1465

    
1466
    if (ra == 31) {
1467
        va = tcg_const_i64(0);
1468
    } else {
1469
        va = cpu_ir[ra];
1470
    }
1471
    if (islit) {
1472
        vb = tcg_const_i64(lit);
1473
    } else {
1474
        vb = cpu_ir[rb];
1475
    }
1476

    
1477
    tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1478

    
1479
    if (ra == 31) {
1480
        tcg_temp_free(va);
1481
    }
1482
    if (islit) {
1483
        tcg_temp_free(vb);
1484
    }
1485
}
1486

    
1487
static void gen_rx(int ra, int set)
1488
{
1489
    TCGv_i32 tmp;
1490

    
1491
    if (ra != 31) {
1492
        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1493
    }
1494

    
1495
    tmp = tcg_const_i32(set);
1496
    tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1497
    tcg_temp_free_i32(tmp);
1498
}
1499

    
1500
static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1501
{
1502
    /* We're emulating OSF/1 PALcode.  Many of these are trivial access
1503
       to internal cpu registers.  */
1504

    
1505
    /* Unprivileged PAL call */
1506
    if (palcode >= 0x80 && palcode < 0xC0) {
1507
        switch (palcode) {
1508
        case 0x86:
1509
            /* IMB */
1510
            /* No-op inside QEMU.  */
1511
            break;
1512
        case 0x9E:
1513
            /* RDUNIQUE */
1514
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1515
            break;
1516
        case 0x9F:
1517
            /* WRUNIQUE */
1518
            tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1519
            break;
1520
        default:
1521
            return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1522
        }
1523
        return NO_EXIT;
1524
    }
1525

    
1526
#ifndef CONFIG_USER_ONLY
1527
    /* Privileged PAL code */
1528
    if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1529
        switch (palcode) {
1530
        case 0x01:
1531
            /* CFLUSH */
1532
            /* No-op inside QEMU.  */
1533
            break;
1534
        case 0x02:
1535
            /* DRAINA */
1536
            /* No-op inside QEMU.  */
1537
            break;
1538
        case 0x2D:
1539
            /* WRVPTPTR */
1540
            tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
1541
            break;
1542
        case 0x31:
1543
            /* WRVAL */
1544
            tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1545
            break;
1546
        case 0x32:
1547
            /* RDVAL */
1548
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1549
            break;
1550

    
1551
        case 0x35: {
1552
            /* SWPIPL */
1553
            TCGv tmp;
1554

    
1555
            /* Note that we already know we're in kernel mode, so we know
1556
               that PS only contains the 3 IPL bits.  */
1557
            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1558

    
1559
            /* But make sure and store only the 3 IPL bits from the user.  */
1560
            tmp = tcg_temp_new();
1561
            tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1562
            tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1563
            tcg_temp_free(tmp);
1564
            break;
1565
        }
1566

    
1567
        case 0x36:
1568
            /* RDPS */
1569
            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1570
            break;
1571
        case 0x38:
1572
            /* WRUSP */
1573
            tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1574
            break;
1575
        case 0x3A:
1576
            /* RDUSP */
1577
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1578
            break;
1579
        case 0x3C:
1580
            /* WHAMI */
1581
            tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1582
                              offsetof(CPUAlphaState, cpu_index));
1583
            break;
1584

    
1585
        default:
1586
            return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1587
        }
1588
        return NO_EXIT;
1589
    }
1590
#endif
1591

    
1592
    return gen_invalid(ctx);
1593
}
1594

    
1595
#ifndef CONFIG_USER_ONLY
1596

    
1597
#define PR_BYTE         0x100000
1598
#define PR_LONG         0x200000
1599

    
1600
static int cpu_pr_data(int pr)
1601
{
1602
    switch (pr) {
1603
    case  0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1604
    case  1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1605
    case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1606
    case  3: return offsetof(CPUAlphaState, trap_arg0);
1607
    case  4: return offsetof(CPUAlphaState, trap_arg1);
1608
    case  5: return offsetof(CPUAlphaState, trap_arg2);
1609
    case  6: return offsetof(CPUAlphaState, exc_addr);
1610
    case  7: return offsetof(CPUAlphaState, palbr);
1611
    case  8: return offsetof(CPUAlphaState, ptbr);
1612
    case  9: return offsetof(CPUAlphaState, vptptr);
1613
    case 10: return offsetof(CPUAlphaState, unique);
1614
    case 11: return offsetof(CPUAlphaState, sysval);
1615
    case 12: return offsetof(CPUAlphaState, usp);
1616

    
1617
    case 32 ... 39:
1618
        return offsetof(CPUAlphaState, shadow[pr - 32]);
1619
    case 40 ... 63:
1620
        return offsetof(CPUAlphaState, scratch[pr - 40]);
1621

    
1622
    case 251:
1623
        return offsetof(CPUAlphaState, alarm_expire);
1624
    }
1625
    return 0;
1626
}
1627

    
1628
static ExitStatus gen_mfpr(int ra, int regno)
1629
{
1630
    int data = cpu_pr_data(regno);
1631

    
1632
    /* In our emulated PALcode, these processor registers have no
1633
       side effects from reading.  */
1634
    if (ra == 31) {
1635
        return NO_EXIT;
1636
    }
1637

    
1638
    if (regno == 250) {
1639
        /* WALL_TIME */
1640
        if (use_icount) {
1641
            gen_io_start();
1642
            gen_helper_get_time(cpu_ir[ra]);
1643
            gen_io_end();
1644
            return EXIT_PC_STALE;
1645
        } else {
1646
            gen_helper_get_time(cpu_ir[ra]);
1647
            return NO_EXIT;
1648
        }
1649
    }
1650

    
1651
    /* The basic registers are data only, and unknown registers
1652
       are read-zero, write-ignore.  */
1653
    if (data == 0) {
1654
        tcg_gen_movi_i64(cpu_ir[ra], 0);
1655
    } else if (data & PR_BYTE) {
1656
        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1657
    } else if (data & PR_LONG) {
1658
        tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1659
    } else {
1660
        tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1661
    }
1662
    return NO_EXIT;
1663
}
1664

    
1665
static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
1666
{
1667
    TCGv tmp;
1668
    int data;
1669

    
1670
    if (rb == 31) {
1671
        tmp = tcg_const_i64(0);
1672
    } else {
1673
        tmp = cpu_ir[rb];
1674
    }
1675

    
1676
    switch (regno) {
1677
    case 255:
1678
        /* TBIA */
1679
        gen_helper_tbia(cpu_env);
1680
        break;
1681

    
1682
    case 254:
1683
        /* TBIS */
1684
        gen_helper_tbis(cpu_env, tmp);
1685
        break;
1686

    
1687
    case 253:
1688
        /* WAIT */
1689
        tmp = tcg_const_i64(1);
1690
        tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUAlphaState, halted));
1691
        return gen_excp(ctx, EXCP_HLT, 0);
1692

    
1693
    case 252:
1694
        /* HALT */
1695
        gen_helper_halt(tmp);
1696
        return EXIT_PC_STALE;
1697

    
1698
    case 251:
1699
        /* ALARM */
1700
        gen_helper_set_alarm(cpu_env, tmp);
1701
        break;
1702

    
1703
    default:
1704
        /* The basic registers are data only, and unknown registers
1705
           are read-zero, write-ignore.  */
1706
        data = cpu_pr_data(regno);
1707
        if (data != 0) {
1708
            if (data & PR_BYTE) {
1709
                tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1710
            } else if (data & PR_LONG) {
1711
                tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1712
            } else {
1713
                tcg_gen_st_i64(tmp, cpu_env, data);
1714
            }
1715
        }
1716
        break;
1717
    }
1718

    
1719
    if (rb == 31) {
1720
        tcg_temp_free(tmp);
1721
    }
1722

    
1723
    return NO_EXIT;
1724
}
1725
#endif /* !USER_ONLY*/
1726

    
1727
static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1728
{
1729
    uint32_t palcode;
1730
    int32_t disp21, disp16;
1731
#ifndef CONFIG_USER_ONLY
1732
    int32_t disp12;
1733
#endif
1734
    uint16_t fn11;
1735
    uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
1736
    uint8_t lit;
1737
    ExitStatus ret;
1738

    
1739
    /* Decode all instruction fields */
1740
    opc = insn >> 26;
1741
    ra = (insn >> 21) & 0x1F;
1742
    rb = (insn >> 16) & 0x1F;
1743
    rc = insn & 0x1F;
1744
    real_islit = islit = (insn >> 12) & 1;
1745
    if (rb == 31 && !islit) {
1746
        islit = 1;
1747
        lit = 0;
1748
    } else
1749
        lit = (insn >> 13) & 0xFF;
1750
    palcode = insn & 0x03FFFFFF;
1751
    disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1752
    disp16 = (int16_t)(insn & 0x0000FFFF);
1753
#ifndef CONFIG_USER_ONLY
1754
    disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1755
#endif
1756
    fn11 = (insn >> 5) & 0x000007FF;
1757
    fpfn = fn11 & 0x3F;
1758
    fn7 = (insn >> 5) & 0x0000007F;
1759
    LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1760
              opc, ra, rb, rc, disp16);
1761

    
1762
    ret = NO_EXIT;
1763
    switch (opc) {
1764
    case 0x00:
1765
        /* CALL_PAL */
1766
        ret = gen_call_pal(ctx, palcode);
1767
        break;
1768
    case 0x01:
1769
        /* OPC01 */
1770
        goto invalid_opc;
1771
    case 0x02:
1772
        /* OPC02 */
1773
        goto invalid_opc;
1774
    case 0x03:
1775
        /* OPC03 */
1776
        goto invalid_opc;
1777
    case 0x04:
1778
        /* OPC04 */
1779
        goto invalid_opc;
1780
    case 0x05:
1781
        /* OPC05 */
1782
        goto invalid_opc;
1783
    case 0x06:
1784
        /* OPC06 */
1785
        goto invalid_opc;
1786
    case 0x07:
1787
        /* OPC07 */
1788
        goto invalid_opc;
1789
    case 0x08:
1790
        /* LDA */
1791
        if (likely(ra != 31)) {
1792
            if (rb != 31)
1793
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1794
            else
1795
                tcg_gen_movi_i64(cpu_ir[ra], disp16);
1796
        }
1797
        break;
1798
    case 0x09:
1799
        /* LDAH */
1800
        if (likely(ra != 31)) {
1801
            if (rb != 31)
1802
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1803
            else
1804
                tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1805
        }
1806
        break;
1807
    case 0x0A:
1808
        /* LDBU */
1809
        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1810
            gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1811
            break;
1812
        }
1813
        goto invalid_opc;
1814
    case 0x0B:
1815
        /* LDQ_U */
1816
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1817
        break;
1818
    case 0x0C:
1819
        /* LDWU */
1820
        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1821
            gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1822
            break;
1823
        }
1824
        goto invalid_opc;
1825
    case 0x0D:
1826
        /* STW */
1827
        gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1828
        break;
1829
    case 0x0E:
1830
        /* STB */
1831
        gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1832
        break;
1833
    case 0x0F:
1834
        /* STQ_U */
1835
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1836
        break;
1837
    case 0x10:
1838
        switch (fn7) {
1839
        case 0x00:
1840
            /* ADDL */
1841
            if (likely(rc != 31)) {
1842
                if (ra != 31) {
1843
                    if (islit) {
1844
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1845
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1846
                    } else {
1847
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1848
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1849
                    }
1850
                } else {
1851
                    if (islit)
1852
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1853
                    else
1854
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1855
                }
1856
            }
1857
            break;
1858
        case 0x02:
1859
            /* S4ADDL */
1860
            if (likely(rc != 31)) {
1861
                if (ra != 31) {
1862
                    TCGv tmp = tcg_temp_new();
1863
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1864
                    if (islit)
1865
                        tcg_gen_addi_i64(tmp, tmp, lit);
1866
                    else
1867
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1868
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1869
                    tcg_temp_free(tmp);
1870
                } else {
1871
                    if (islit)
1872
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1873
                    else
1874
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1875
                }
1876
            }
1877
            break;
1878
        case 0x09:
1879
            /* SUBL */
1880
            if (likely(rc != 31)) {
1881
                if (ra != 31) {
1882
                    if (islit)
1883
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1884
                    else
1885
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1886
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1887
                } else {
1888
                    if (islit)
1889
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1890
                    else {
1891
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1892
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1893
                }
1894
            }
1895
            break;
1896
        case 0x0B:
1897
            /* S4SUBL */
1898
            if (likely(rc != 31)) {
1899
                if (ra != 31) {
1900
                    TCGv tmp = tcg_temp_new();
1901
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1902
                    if (islit)
1903
                        tcg_gen_subi_i64(tmp, tmp, lit);
1904
                    else
1905
                        tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1906
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1907
                    tcg_temp_free(tmp);
1908
                } else {
1909
                    if (islit)
1910
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1911
                    else {
1912
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1913
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1914
                    }
1915
                }
1916
            }
1917
            break;
1918
        case 0x0F:
1919
            /* CMPBGE */
1920
            gen_cmpbge(ra, rb, rc, islit, lit);
1921
            break;
1922
        case 0x12:
1923
            /* S8ADDL */
1924
            if (likely(rc != 31)) {
1925
                if (ra != 31) {
1926
                    TCGv tmp = tcg_temp_new();
1927
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1928
                    if (islit)
1929
                        tcg_gen_addi_i64(tmp, tmp, lit);
1930
                    else
1931
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1932
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1933
                    tcg_temp_free(tmp);
1934
                } else {
1935
                    if (islit)
1936
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1937
                    else
1938
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1939
                }
1940
            }
1941
            break;
1942
        case 0x1B:
1943
            /* S8SUBL */
1944
            if (likely(rc != 31)) {
1945
                if (ra != 31) {
1946
                    TCGv tmp = tcg_temp_new();
1947
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1948
                    if (islit)
1949
                        tcg_gen_subi_i64(tmp, tmp, lit);
1950
                    else
1951
                       tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1952
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1953
                    tcg_temp_free(tmp);
1954
                } else {
1955
                    if (islit)
1956
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1957
                    else
1958
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1959
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1960
                    }
1961
                }
1962
            }
1963
            break;
1964
        case 0x1D:
1965
            /* CMPULT */
1966
            gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1967
            break;
1968
        case 0x20:
1969
            /* ADDQ */
1970
            if (likely(rc != 31)) {
1971
                if (ra != 31) {
1972
                    if (islit)
1973
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1974
                    else
1975
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1976
                } else {
1977
                    if (islit)
1978
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1979
                    else
1980
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1981
                }
1982
            }
1983
            break;
1984
        case 0x22:
1985
            /* S4ADDQ */
1986
            if (likely(rc != 31)) {
1987
                if (ra != 31) {
1988
                    TCGv tmp = tcg_temp_new();
1989
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1990
                    if (islit)
1991
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1992
                    else
1993
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1994
                    tcg_temp_free(tmp);
1995
                } else {
1996
                    if (islit)
1997
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1998
                    else
1999
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2000
                }
2001
            }
2002
            break;
2003
        case 0x29:
2004
            /* SUBQ */
2005
            if (likely(rc != 31)) {
2006
                if (ra != 31) {
2007
                    if (islit)
2008
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2009
                    else
2010
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2011
                } else {
2012
                    if (islit)
2013
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
2014
                    else
2015
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2016
                }
2017
            }
2018
            break;
2019
        case 0x2B:
2020
            /* S4SUBQ */
2021
            if (likely(rc != 31)) {
2022
                if (ra != 31) {
2023
                    TCGv tmp = tcg_temp_new();
2024
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2025
                    if (islit)
2026
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2027
                    else
2028
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2029
                    tcg_temp_free(tmp);
2030
                } else {
2031
                    if (islit)
2032
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
2033
                    else
2034
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2035
                }
2036
            }
2037
            break;
2038
        case 0x2D:
2039
            /* CMPEQ */
2040
            gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
2041
            break;
2042
        case 0x32:
2043
            /* S8ADDQ */
2044
            if (likely(rc != 31)) {
2045
                if (ra != 31) {
2046
                    TCGv tmp = tcg_temp_new();
2047
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2048
                    if (islit)
2049
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2050
                    else
2051
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2052
                    tcg_temp_free(tmp);
2053
                } else {
2054
                    if (islit)
2055
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2056
                    else
2057
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2058
                }
2059
            }
2060
            break;
2061
        case 0x3B:
2062
            /* S8SUBQ */
2063
            if (likely(rc != 31)) {
2064
                if (ra != 31) {
2065
                    TCGv tmp = tcg_temp_new();
2066
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2067
                    if (islit)
2068
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2069
                    else
2070
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2071
                    tcg_temp_free(tmp);
2072
                } else {
2073
                    if (islit)
2074
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
2075
                    else
2076
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2077
                }
2078
            }
2079
            break;
2080
        case 0x3D:
2081
            /* CMPULE */
2082
            gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2083
            break;
2084
        case 0x40:
2085
            /* ADDL/V */
2086
            gen_addlv(ra, rb, rc, islit, lit);
2087
            break;
2088
        case 0x49:
2089
            /* SUBL/V */
2090
            gen_sublv(ra, rb, rc, islit, lit);
2091
            break;
2092
        case 0x4D:
2093
            /* CMPLT */
2094
            gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2095
            break;
2096
        case 0x60:
2097
            /* ADDQ/V */
2098
            gen_addqv(ra, rb, rc, islit, lit);
2099
            break;
2100
        case 0x69:
2101
            /* SUBQ/V */
2102
            gen_subqv(ra, rb, rc, islit, lit);
2103
            break;
2104
        case 0x6D:
2105
            /* CMPLE */
2106
            gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2107
            break;
2108
        default:
2109
            goto invalid_opc;
2110
        }
2111
        break;
2112
    case 0x11:
2113
        switch (fn7) {
2114
        case 0x00:
2115
            /* AND */
2116
            if (likely(rc != 31)) {
2117
                if (ra == 31)
2118
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2119
                else if (islit)
2120
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2121
                else
2122
                    tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2123
            }
2124
            break;
2125
        case 0x08:
2126
            /* BIC */
2127
            if (likely(rc != 31)) {
2128
                if (ra != 31) {
2129
                    if (islit)
2130
                        tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2131
                    else
2132
                        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2133
                } else
2134
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2135
            }
2136
            break;
2137
        case 0x14:
2138
            /* CMOVLBS */
2139
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2140
            break;
2141
        case 0x16:
2142
            /* CMOVLBC */
2143
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2144
            break;
2145
        case 0x20:
2146
            /* BIS */
2147
            if (likely(rc != 31)) {
2148
                if (ra != 31) {
2149
                    if (islit)
2150
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2151
                    else
2152
                        tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2153
                } else {
2154
                    if (islit)
2155
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2156
                    else
2157
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2158
                }
2159
            }
2160
            break;
2161
        case 0x24:
2162
            /* CMOVEQ */
2163
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2164
            break;
2165
        case 0x26:
2166
            /* CMOVNE */
2167
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2168
            break;
2169
        case 0x28:
2170
            /* ORNOT */
2171
            if (likely(rc != 31)) {
2172
                if (ra != 31) {
2173
                    if (islit)
2174
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2175
                    else
2176
                        tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2177
                } else {
2178
                    if (islit)
2179
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2180
                    else
2181
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2182
                }
2183
            }
2184
            break;
2185
        case 0x40:
2186
            /* XOR */
2187
            if (likely(rc != 31)) {
2188
                if (ra != 31) {
2189
                    if (islit)
2190
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2191
                    else
2192
                        tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2193
                } else {
2194
                    if (islit)
2195
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2196
                    else
2197
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2198
                }
2199
            }
2200
            break;
2201
        case 0x44:
2202
            /* CMOVLT */
2203
            gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2204
            break;
2205
        case 0x46:
2206
            /* CMOVGE */
2207
            gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2208
            break;
2209
        case 0x48:
2210
            /* EQV */
2211
            if (likely(rc != 31)) {
2212
                if (ra != 31) {
2213
                    if (islit)
2214
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2215
                    else
2216
                        tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2217
                } else {
2218
                    if (islit)
2219
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2220
                    else
2221
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2222
                }
2223
            }
2224
            break;
2225
        case 0x61:
2226
            /* AMASK */
2227
            if (likely(rc != 31)) {
2228
                uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2229

    
2230
                if (islit) {
2231
                    tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2232
                } else {
2233
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2234
                }
2235
            }
2236
            break;
2237
        case 0x64:
2238
            /* CMOVLE */
2239
            gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2240
            break;
2241
        case 0x66:
2242
            /* CMOVGT */
2243
            gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2244
            break;
2245
        case 0x6C:
2246
            /* IMPLVER */
2247
            if (rc != 31)
2248
                tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2249
            break;
2250
        default:
2251
            goto invalid_opc;
2252
        }
2253
        break;
2254
    case 0x12:
2255
        switch (fn7) {
2256
        case 0x02:
2257
            /* MSKBL */
2258
            gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2259
            break;
2260
        case 0x06:
2261
            /* EXTBL */
2262
            gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2263
            break;
2264
        case 0x0B:
2265
            /* INSBL */
2266
            gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2267
            break;
2268
        case 0x12:
2269
            /* MSKWL */
2270
            gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2271
            break;
2272
        case 0x16:
2273
            /* EXTWL */
2274
            gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2275
            break;
2276
        case 0x1B:
2277
            /* INSWL */
2278
            gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2279
            break;
2280
        case 0x22:
2281
            /* MSKLL */
2282
            gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2283
            break;
2284
        case 0x26:
2285
            /* EXTLL */
2286
            gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2287
            break;
2288
        case 0x2B:
2289
            /* INSLL */
2290
            gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2291
            break;
2292
        case 0x30:
2293
            /* ZAP */
2294
            gen_zap(ra, rb, rc, islit, lit);
2295
            break;
2296
        case 0x31:
2297
            /* ZAPNOT */
2298
            gen_zapnot(ra, rb, rc, islit, lit);
2299
            break;
2300
        case 0x32:
2301
            /* MSKQL */
2302
            gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2303
            break;
2304
        case 0x34:
2305
            /* SRL */
2306
            if (likely(rc != 31)) {
2307
                if (ra != 31) {
2308
                    if (islit)
2309
                        tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2310
                    else {
2311
                        TCGv shift = tcg_temp_new();
2312
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2313
                        tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2314
                        tcg_temp_free(shift);
2315
                    }
2316
                } else
2317
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2318
            }
2319
            break;
2320
        case 0x36:
2321
            /* EXTQL */
2322
            gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2323
            break;
2324
        case 0x39:
2325
            /* SLL */
2326
            if (likely(rc != 31)) {
2327
                if (ra != 31) {
2328
                    if (islit)
2329
                        tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2330
                    else {
2331
                        TCGv shift = tcg_temp_new();
2332
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2333
                        tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2334
                        tcg_temp_free(shift);
2335
                    }
2336
                } else
2337
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2338
            }
2339
            break;
2340
        case 0x3B:
2341
            /* INSQL */
2342
            gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2343
            break;
2344
        case 0x3C:
2345
            /* SRA */
2346
            if (likely(rc != 31)) {
2347
                if (ra != 31) {
2348
                    if (islit)
2349
                        tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2350
                    else {
2351
                        TCGv shift = tcg_temp_new();
2352
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2353
                        tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2354
                        tcg_temp_free(shift);
2355
                    }
2356
                } else
2357
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2358
            }
2359
            break;
2360
        case 0x52:
2361
            /* MSKWH */
2362
            gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2363
            break;
2364
        case 0x57:
2365
            /* INSWH */
2366
            gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2367
            break;
2368
        case 0x5A:
2369
            /* EXTWH */
2370
            gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2371
            break;
2372
        case 0x62:
2373
            /* MSKLH */
2374
            gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2375
            break;
2376
        case 0x67:
2377
            /* INSLH */
2378
            gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2379
            break;
2380
        case 0x6A:
2381
            /* EXTLH */
2382
            gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2383
            break;
2384
        case 0x72:
2385
            /* MSKQH */
2386
            gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2387
            break;
2388
        case 0x77:
2389
            /* INSQH */
2390
            gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2391
            break;
2392
        case 0x7A:
2393
            /* EXTQH */
2394
            gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2395
            break;
2396
        default:
2397
            goto invalid_opc;
2398
        }
2399
        break;
2400
    case 0x13:
2401
        switch (fn7) {
2402
        case 0x00:
2403
            /* MULL */
2404
            if (likely(rc != 31)) {
2405
                if (ra == 31)
2406
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2407
                else {
2408
                    if (islit)
2409
                        tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2410
                    else
2411
                        tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2412
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2413
                }
2414
            }
2415
            break;
2416
        case 0x20:
2417
            /* MULQ */
2418
            if (likely(rc != 31)) {
2419
                if (ra == 31)
2420
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2421
                else if (islit)
2422
                    tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2423
                else
2424
                    tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2425
            }
2426
            break;
2427
        case 0x30:
2428
            /* UMULH */
2429
            gen_umulh(ra, rb, rc, islit, lit);
2430
            break;
2431
        case 0x40:
2432
            /* MULL/V */
2433
            gen_mullv(ra, rb, rc, islit, lit);
2434
            break;
2435
        case 0x60:
2436
            /* MULQ/V */
2437
            gen_mulqv(ra, rb, rc, islit, lit);
2438
            break;
2439
        default:
2440
            goto invalid_opc;
2441
        }
2442
        break;
2443
    case 0x14:
2444
        switch (fpfn) { /* fn11 & 0x3F */
2445
        case 0x04:
2446
            /* ITOFS */
2447
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2448
                goto invalid_opc;
2449
            }
2450
            if (likely(rc != 31)) {
2451
                if (ra != 31) {
2452
                    TCGv_i32 tmp = tcg_temp_new_i32();
2453
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2454
                    gen_helper_memory_to_s(cpu_fir[rc], tmp);
2455
                    tcg_temp_free_i32(tmp);
2456
                } else
2457
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2458
            }
2459
            break;
2460
        case 0x0A:
2461
            /* SQRTF */
2462
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2463
                gen_fsqrtf(rb, rc);
2464
                break;
2465
            }
2466
            goto invalid_opc;
2467
        case 0x0B:
2468
            /* SQRTS */
2469
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2470
                gen_fsqrts(ctx, rb, rc, fn11);
2471
                break;
2472
            }
2473
            goto invalid_opc;
2474
        case 0x14:
2475
            /* ITOFF */
2476
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2477
                goto invalid_opc;
2478
            }
2479
            if (likely(rc != 31)) {
2480
                if (ra != 31) {
2481
                    TCGv_i32 tmp = tcg_temp_new_i32();
2482
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2483
                    gen_helper_memory_to_f(cpu_fir[rc], tmp);
2484
                    tcg_temp_free_i32(tmp);
2485
                } else
2486
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2487
            }
2488
            break;
2489
        case 0x24:
2490
            /* ITOFT */
2491
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2492
                goto invalid_opc;
2493
            }
2494
            if (likely(rc != 31)) {
2495
                if (ra != 31)
2496
                    tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2497
                else
2498
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2499
            }
2500
            break;
2501
        case 0x2A:
2502
            /* SQRTG */
2503
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2504
                gen_fsqrtg(rb, rc);
2505
                break;
2506
            }
2507
            goto invalid_opc;
2508
        case 0x02B:
2509
            /* SQRTT */
2510
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2511
                gen_fsqrtt(ctx, rb, rc, fn11);
2512
                break;
2513
            }
2514
            goto invalid_opc;
2515
        default:
2516
            goto invalid_opc;
2517
        }
2518
        break;
2519
    case 0x15:
2520
        /* VAX floating point */
2521
        /* XXX: rounding mode and trap are ignored (!) */
2522
        switch (fpfn) { /* fn11 & 0x3F */
2523
        case 0x00:
2524
            /* ADDF */
2525
            gen_faddf(ra, rb, rc);
2526
            break;
2527
        case 0x01:
2528
            /* SUBF */
2529
            gen_fsubf(ra, rb, rc);
2530
            break;
2531
        case 0x02:
2532
            /* MULF */
2533
            gen_fmulf(ra, rb, rc);
2534
            break;
2535
        case 0x03:
2536
            /* DIVF */
2537
            gen_fdivf(ra, rb, rc);
2538
            break;
2539
        case 0x1E:
2540
            /* CVTDG */
2541
#if 0 // TODO
2542
            gen_fcvtdg(rb, rc);
2543
#else
2544
            goto invalid_opc;
2545
#endif
2546
            break;
2547
        case 0x20:
2548
            /* ADDG */
2549
            gen_faddg(ra, rb, rc);
2550
            break;
2551
        case 0x21:
2552
            /* SUBG */
2553
            gen_fsubg(ra, rb, rc);
2554
            break;
2555
        case 0x22:
2556
            /* MULG */
2557
            gen_fmulg(ra, rb, rc);
2558
            break;
2559
        case 0x23:
2560
            /* DIVG */
2561
            gen_fdivg(ra, rb, rc);
2562
            break;
2563
        case 0x25:
2564
            /* CMPGEQ */
2565
            gen_fcmpgeq(ra, rb, rc);
2566
            break;
2567
        case 0x26:
2568
            /* CMPGLT */
2569
            gen_fcmpglt(ra, rb, rc);
2570
            break;
2571
        case 0x27:
2572
            /* CMPGLE */
2573
            gen_fcmpgle(ra, rb, rc);
2574
            break;
2575
        case 0x2C:
2576
            /* CVTGF */
2577
            gen_fcvtgf(rb, rc);
2578
            break;
2579
        case 0x2D:
2580
            /* CVTGD */
2581
#if 0 // TODO
2582
            gen_fcvtgd(rb, rc);
2583
#else
2584
            goto invalid_opc;
2585
#endif
2586
            break;
2587
        case 0x2F:
2588
            /* CVTGQ */
2589
            gen_fcvtgq(rb, rc);
2590
            break;
2591
        case 0x3C:
2592
            /* CVTQF */
2593
            gen_fcvtqf(rb, rc);
2594
            break;
2595
        case 0x3E:
2596
            /* CVTQG */
2597
            gen_fcvtqg(rb, rc);
2598
            break;
2599
        default:
2600
            goto invalid_opc;
2601
        }
2602
        break;
2603
    case 0x16:
2604
        /* IEEE floating-point */
2605
        switch (fpfn) { /* fn11 & 0x3F */
2606
        case 0x00:
2607
            /* ADDS */
2608
            gen_fadds(ctx, ra, rb, rc, fn11);
2609
            break;
2610
        case 0x01:
2611
            /* SUBS */
2612
            gen_fsubs(ctx, ra, rb, rc, fn11);
2613
            break;
2614
        case 0x02:
2615
            /* MULS */
2616
            gen_fmuls(ctx, ra, rb, rc, fn11);
2617
            break;
2618
        case 0x03:
2619
            /* DIVS */
2620
            gen_fdivs(ctx, ra, rb, rc, fn11);
2621
            break;
2622
        case 0x20:
2623
            /* ADDT */
2624
            gen_faddt(ctx, ra, rb, rc, fn11);
2625
            break;
2626
        case 0x21:
2627
            /* SUBT */
2628
            gen_fsubt(ctx, ra, rb, rc, fn11);
2629
            break;
2630
        case 0x22:
2631
            /* MULT */
2632
            gen_fmult(ctx, ra, rb, rc, fn11);
2633
            break;
2634
        case 0x23:
2635
            /* DIVT */
2636
            gen_fdivt(ctx, ra, rb, rc, fn11);
2637
            break;
2638
        case 0x24:
2639
            /* CMPTUN */
2640
            gen_fcmptun(ctx, ra, rb, rc, fn11);
2641
            break;
2642
        case 0x25:
2643
            /* CMPTEQ */
2644
            gen_fcmpteq(ctx, ra, rb, rc, fn11);
2645
            break;
2646
        case 0x26:
2647
            /* CMPTLT */
2648
            gen_fcmptlt(ctx, ra, rb, rc, fn11);
2649
            break;
2650
        case 0x27:
2651
            /* CMPTLE */
2652
            gen_fcmptle(ctx, ra, rb, rc, fn11);
2653
            break;
2654
        case 0x2C:
2655
            if (fn11 == 0x2AC || fn11 == 0x6AC) {
2656
                /* CVTST */
2657
                gen_fcvtst(ctx, rb, rc, fn11);
2658
            } else {
2659
                /* CVTTS */
2660
                gen_fcvtts(ctx, rb, rc, fn11);
2661
            }
2662
            break;
2663
        case 0x2F:
2664
            /* CVTTQ */
2665
            gen_fcvttq(ctx, rb, rc, fn11);
2666
            break;
2667
        case 0x3C:
2668
            /* CVTQS */
2669
            gen_fcvtqs(ctx, rb, rc, fn11);
2670
            break;
2671
        case 0x3E:
2672
            /* CVTQT */
2673
            gen_fcvtqt(ctx, rb, rc, fn11);
2674
            break;
2675
        default:
2676
            goto invalid_opc;
2677
        }
2678
        break;
2679
    case 0x17:
2680
        switch (fn11) {
2681
        case 0x010:
2682
            /* CVTLQ */
2683
            gen_fcvtlq(rb, rc);
2684
            break;
2685
        case 0x020:
2686
            if (likely(rc != 31)) {
2687
                if (ra == rb) {
2688
                    /* FMOV */
2689
                    if (ra == 31)
2690
                        tcg_gen_movi_i64(cpu_fir[rc], 0);
2691
                    else
2692
                        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2693
                } else {
2694
                    /* CPYS */
2695
                    gen_fcpys(ra, rb, rc);
2696
                }
2697
            }
2698
            break;
2699
        case 0x021:
2700
            /* CPYSN */
2701
            gen_fcpysn(ra, rb, rc);
2702
            break;
2703
        case 0x022:
2704
            /* CPYSE */
2705
            gen_fcpyse(ra, rb, rc);
2706
            break;
2707
        case 0x024:
2708
            /* MT_FPCR */
2709
            if (likely(ra != 31))
2710
                gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
2711
            else {
2712
                TCGv tmp = tcg_const_i64(0);
2713
                gen_helper_store_fpcr(cpu_env, tmp);
2714
                tcg_temp_free(tmp);
2715
            }
2716
            break;
2717
        case 0x025:
2718
            /* MF_FPCR */
2719
            if (likely(ra != 31))
2720
                gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
2721
            break;
2722
        case 0x02A:
2723
            /* FCMOVEQ */
2724
            gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2725
            break;
2726
        case 0x02B:
2727
            /* FCMOVNE */
2728
            gen_fcmov(TCG_COND_NE, ra, rb, rc);
2729
            break;
2730
        case 0x02C:
2731
            /* FCMOVLT */
2732
            gen_fcmov(TCG_COND_LT, ra, rb, rc);
2733
            break;
2734
        case 0x02D:
2735
            /* FCMOVGE */
2736
            gen_fcmov(TCG_COND_GE, ra, rb, rc);
2737
            break;
2738
        case 0x02E:
2739
            /* FCMOVLE */
2740
            gen_fcmov(TCG_COND_LE, ra, rb, rc);
2741
            break;
2742
        case 0x02F:
2743
            /* FCMOVGT */
2744
            gen_fcmov(TCG_COND_GT, ra, rb, rc);
2745
            break;
2746
        case 0x030:
2747
            /* CVTQL */
2748
            gen_fcvtql(rb, rc);
2749
            break;
2750
        case 0x130:
2751
            /* CVTQL/V */
2752
        case 0x530:
2753
            /* CVTQL/SV */
2754
            /* ??? I'm pretty sure there's nothing that /sv needs to do that
2755
               /v doesn't do.  The only thing I can think is that /sv is a
2756
               valid instruction merely for completeness in the ISA.  */
2757
            gen_fcvtql_v(ctx, rb, rc);
2758
            break;
2759
        default:
2760
            goto invalid_opc;
2761
        }
2762
        break;
2763
    case 0x18:
2764
        switch ((uint16_t)disp16) {
2765
        case 0x0000:
2766
            /* TRAPB */
2767
            /* No-op.  */
2768
            break;
2769
        case 0x0400:
2770
            /* EXCB */
2771
            /* No-op.  */
2772
            break;
2773
        case 0x4000:
2774
            /* MB */
2775
            /* No-op */
2776
            break;
2777
        case 0x4400:
2778
            /* WMB */
2779
            /* No-op */
2780
            break;
2781
        case 0x8000:
2782
            /* FETCH */
2783
            /* No-op */
2784
            break;
2785
        case 0xA000:
2786
            /* FETCH_M */
2787
            /* No-op */
2788
            break;
2789
        case 0xC000:
2790
            /* RPCC */
2791
            if (ra != 31) {
2792
                if (use_icount) {
2793
                    gen_io_start();
2794
                    gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2795
                    gen_io_end();
2796
                    ret = EXIT_PC_STALE;
2797
                } else {
2798
                    gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2799
                }
2800
            }
2801
            break;
2802
        case 0xE000:
2803
            /* RC */
2804
            gen_rx(ra, 0);
2805
            break;
2806
        case 0xE800:
2807
            /* ECB */
2808
            break;
2809
        case 0xF000:
2810
            /* RS */
2811
            gen_rx(ra, 1);
2812
            break;
2813
        case 0xF800:
2814
            /* WH64 */
2815
            /* No-op */
2816
            break;
2817
        default:
2818
            goto invalid_opc;
2819
        }
2820
        break;
2821
    case 0x19:
2822
        /* HW_MFPR (PALcode) */
2823
#ifndef CONFIG_USER_ONLY
2824
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2825
            return gen_mfpr(ra, insn & 0xffff);
2826
        }
2827
#endif
2828
        goto invalid_opc;
2829
    case 0x1A:
2830
        /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2831
           prediction stack action, which of course we don't implement.  */
2832
        if (rb != 31) {
2833
            tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2834
        } else {
2835
            tcg_gen_movi_i64(cpu_pc, 0);
2836
        }
2837
        if (ra != 31) {
2838
            tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2839
        }
2840
        ret = EXIT_PC_UPDATED;
2841
        break;
2842
    case 0x1B:
2843
        /* HW_LD (PALcode) */
2844
#ifndef CONFIG_USER_ONLY
2845
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2846
            TCGv addr;
2847

    
2848
            if (ra == 31) {
2849
                break;
2850
            }
2851

    
2852
            addr = tcg_temp_new();
2853
            if (rb != 31)
2854
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2855
            else
2856
                tcg_gen_movi_i64(addr, disp12);
2857
            switch ((insn >> 12) & 0xF) {
2858
            case 0x0:
2859
                /* Longword physical access (hw_ldl/p) */
2860
                gen_helper_ldl_phys(cpu_ir[ra], addr);
2861
                break;
2862
            case 0x1:
2863
                /* Quadword physical access (hw_ldq/p) */
2864
                gen_helper_ldq_phys(cpu_ir[ra], addr);
2865
                break;
2866
            case 0x2:
2867
                /* Longword physical access with lock (hw_ldl_l/p) */
2868
                gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
2869
                break;
2870
            case 0x3:
2871
                /* Quadword physical access with lock (hw_ldq_l/p) */
2872
                gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
2873
                break;
2874
            case 0x4:
2875
                /* Longword virtual PTE fetch (hw_ldl/v) */
2876
                goto invalid_opc;
2877
            case 0x5:
2878
                /* Quadword virtual PTE fetch (hw_ldq/v) */
2879
                goto invalid_opc;
2880
                break;
2881
            case 0x6:
2882
                /* Incpu_ir[ra]id */
2883
                goto invalid_opc;
2884
            case 0x7:
2885
                /* Incpu_ir[ra]id */
2886
                goto invalid_opc;
2887
            case 0x8:
2888
                /* Longword virtual access (hw_ldl) */
2889
                goto invalid_opc;
2890
            case 0x9:
2891
                /* Quadword virtual access (hw_ldq) */
2892
                goto invalid_opc;
2893
            case 0xA:
2894
                /* Longword virtual access with protection check (hw_ldl/w) */
2895
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2896
                break;
2897
            case 0xB:
2898
                /* Quadword virtual access with protection check (hw_ldq/w) */
2899
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2900
                break;
2901
            case 0xC:
2902
                /* Longword virtual access with alt access mode (hw_ldl/a)*/
2903
                goto invalid_opc;
2904
            case 0xD:
2905
                /* Quadword virtual access with alt access mode (hw_ldq/a) */
2906
                goto invalid_opc;
2907
            case 0xE:
2908
                /* Longword virtual access with alternate access mode and
2909
                   protection checks (hw_ldl/wa) */
2910
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2911
                break;
2912
            case 0xF:
2913
                /* Quadword virtual access with alternate access mode and
2914
                   protection checks (hw_ldq/wa) */
2915
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2916
                break;
2917
            }
2918
            tcg_temp_free(addr);
2919
            break;
2920
        }
2921
#endif
2922
        goto invalid_opc;
2923
    case 0x1C:
2924
        switch (fn7) {
2925
        case 0x00:
2926
            /* SEXTB */
2927
            if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
2928
                goto invalid_opc;
2929
            }
2930
            if (likely(rc != 31)) {
2931
                if (islit)
2932
                    tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2933
                else
2934
                    tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2935
            }
2936
            break;
2937
        case 0x01:
2938
            /* SEXTW */
2939
            if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2940
                if (likely(rc != 31)) {
2941
                    if (islit) {
2942
                        tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2943
                    } else {
2944
                        tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2945
                    }
2946
                }
2947
                break;
2948
            }
2949
            goto invalid_opc;
2950
        case 0x30:
2951
            /* CTPOP */
2952
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2953
                if (likely(rc != 31)) {
2954
                    if (islit) {
2955
                        tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2956
                    } else {
2957
                        gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2958
                    }
2959
                }
2960
                break;
2961
            }
2962
            goto invalid_opc;
2963
        case 0x31:
2964
            /* PERR */
2965
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2966
                gen_perr(ra, rb, rc, islit, lit);
2967
                break;
2968
            }
2969
            goto invalid_opc;
2970
        case 0x32:
2971
            /* CTLZ */
2972
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2973
                if (likely(rc != 31)) {
2974
                    if (islit) {
2975
                        tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2976
                    } else {
2977
                        gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2978
                    }
2979
                }
2980
                break;
2981
            }
2982
            goto invalid_opc;
2983
        case 0x33:
2984
            /* CTTZ */
2985
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2986
                if (likely(rc != 31)) {
2987
                    if (islit) {
2988
                        tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2989
                    } else {
2990
                        gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2991
                    }
2992
                }
2993
                break;
2994
            }
2995
            goto invalid_opc;
2996
        case 0x34:
2997
            /* UNPKBW */
2998
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2999
                if (real_islit || ra != 31) {
3000
                    goto invalid_opc;
3001
                }
3002
                gen_unpkbw(rb, rc);
3003
                break;
3004
            }
3005
            goto invalid_opc;
3006
        case 0x35:
3007
            /* UNPKBL */
3008
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3009
                if (real_islit || ra != 31) {
3010
                    goto invalid_opc;
3011
                }
3012
                gen_unpkbl(rb, rc);
3013
                break;
3014
            }
3015
            goto invalid_opc;
3016
        case 0x36:
3017
            /* PKWB */
3018
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3019
                if (real_islit || ra != 31) {
3020
                    goto invalid_opc;
3021
                }
3022
                gen_pkwb(rb, rc);
3023
                break;
3024
            }
3025
            goto invalid_opc;
3026
        case 0x37:
3027
            /* PKLB */
3028
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3029
                if (real_islit || ra != 31) {
3030
                    goto invalid_opc;
3031
                }
3032
                gen_pklb(rb, rc);
3033
                break;
3034
            }
3035
            goto invalid_opc;
3036
        case 0x38:
3037
            /* MINSB8 */
3038
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3039
                gen_minsb8(ra, rb, rc, islit, lit);
3040
                break;
3041
            }
3042
            goto invalid_opc;
3043
        case 0x39:
3044
            /* MINSW4 */
3045
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3046
                gen_minsw4(ra, rb, rc, islit, lit);
3047
                break;
3048
            }
3049
            goto invalid_opc;
3050
        case 0x3A:
3051
            /* MINUB8 */
3052
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3053
                gen_minub8(ra, rb, rc, islit, lit);
3054
                break;
3055
            }
3056
            goto invalid_opc;
3057
        case 0x3B:
3058
            /* MINUW4 */
3059
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3060
                gen_minuw4(ra, rb, rc, islit, lit);
3061
                break;
3062
            }
3063
            goto invalid_opc;
3064
        case 0x3C:
3065
            /* MAXUB8 */
3066
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3067
                gen_maxub8(ra, rb, rc, islit, lit);
3068
                break;
3069
            }
3070
            goto invalid_opc;
3071
        case 0x3D:
3072
            /* MAXUW4 */
3073
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3074
                gen_maxuw4(ra, rb, rc, islit, lit);
3075
                break;
3076
            }
3077
            goto invalid_opc;
3078
        case 0x3E:
3079
            /* MAXSB8 */
3080
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3081
                gen_maxsb8(ra, rb, rc, islit, lit);
3082
                break;
3083
            }
3084
            goto invalid_opc;
3085
        case 0x3F:
3086
            /* MAXSW4 */
3087
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3088
                gen_maxsw4(ra, rb, rc, islit, lit);
3089
                break;
3090
            }
3091
            goto invalid_opc;
3092
        case 0x70:
3093
            /* FTOIT */
3094
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3095
                goto invalid_opc;
3096
            }
3097
            if (likely(rc != 31)) {
3098
                if (ra != 31)
3099
                    tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3100
                else
3101
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
3102
            }
3103
            break;
3104
        case 0x78:
3105
            /* FTOIS */
3106
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3107
                goto invalid_opc;
3108
            }
3109
            if (rc != 31) {
3110
                TCGv_i32 tmp1 = tcg_temp_new_i32();
3111
                if (ra != 31)
3112
                    gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3113
                else {
3114
                    TCGv tmp2 = tcg_const_i64(0);
3115
                    gen_helper_s_to_memory(tmp1, tmp2);
3116
                    tcg_temp_free(tmp2);
3117
                }
3118
                tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3119
                tcg_temp_free_i32(tmp1);
3120
            }
3121
            break;
3122
        default:
3123
            goto invalid_opc;
3124
        }
3125
        break;
3126
    case 0x1D:
3127
        /* HW_MTPR (PALcode) */
3128
#ifndef CONFIG_USER_ONLY
3129
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3130
            return gen_mtpr(ctx, rb, insn & 0xffff);
3131
        }
3132
#endif
3133
        goto invalid_opc;
3134
    case 0x1E:
3135
        /* HW_RET (PALcode) */
3136
#ifndef CONFIG_USER_ONLY
3137
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3138
            if (rb == 31) {
3139
                /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3140
                   address from EXC_ADDR.  This turns out to be useful for our
3141
                   emulation PALcode, so continue to accept it.  */
3142
                TCGv tmp = tcg_temp_new();
3143
                tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
3144
                gen_helper_hw_ret(cpu_env, tmp);
3145
                tcg_temp_free(tmp);
3146
            } else {
3147
                gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
3148
            }
3149
            ret = EXIT_PC_UPDATED;
3150
            break;
3151
        }
3152
#endif
3153
        goto invalid_opc;
3154
    case 0x1F:
3155
        /* HW_ST (PALcode) */
3156
#ifndef CONFIG_USER_ONLY
3157
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3158
            TCGv addr, val;
3159
            addr = tcg_temp_new();
3160
            if (rb != 31)
3161
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3162
            else
3163
                tcg_gen_movi_i64(addr, disp12);
3164
            if (ra != 31)
3165
                val = cpu_ir[ra];
3166
            else {
3167
                val = tcg_temp_new();
3168
                tcg_gen_movi_i64(val, 0);
3169
            }
3170
            switch ((insn >> 12) & 0xF) {
3171
            case 0x0:
3172
                /* Longword physical access */
3173
                gen_helper_stl_phys(addr, val);
3174
                break;
3175
            case 0x1:
3176
                /* Quadword physical access */
3177
                gen_helper_stq_phys(addr, val);
3178
                break;
3179
            case 0x2:
3180
                /* Longword physical access with lock */
3181
                gen_helper_stl_c_phys(val, cpu_env, addr, val);
3182
                break;
3183
            case 0x3:
3184
                /* Quadword physical access with lock */
3185
                gen_helper_stq_c_phys(val, cpu_env, addr, val);
3186
                break;
3187
            case 0x4:
3188
                /* Longword virtual access */
3189
                goto invalid_opc;
3190
            case 0x5:
3191
                /* Quadword virtual access */
3192
                goto invalid_opc;
3193
            case 0x6:
3194
                /* Invalid */
3195
                goto invalid_opc;
3196
            case 0x7:
3197
                /* Invalid */
3198
                goto invalid_opc;
3199
            case 0x8:
3200
                /* Invalid */
3201
                goto invalid_opc;
3202
            case 0x9:
3203
                /* Invalid */
3204
                goto invalid_opc;
3205
            case 0xA:
3206
                /* Invalid */
3207
                goto invalid_opc;
3208
            case 0xB:
3209
                /* Invalid */
3210
                goto invalid_opc;
3211
            case 0xC:
3212
                /* Longword virtual access with alternate access mode */
3213
                goto invalid_opc;
3214
            case 0xD:
3215
                /* Quadword virtual access with alternate access mode */
3216
                goto invalid_opc;
3217
            case 0xE:
3218
                /* Invalid */
3219
                goto invalid_opc;
3220
            case 0xF:
3221
                /* Invalid */
3222
                goto invalid_opc;
3223
            }
3224
            if (ra == 31)
3225
                tcg_temp_free(val);
3226
            tcg_temp_free(addr);
3227
            break;
3228
        }
3229
#endif
3230
        goto invalid_opc;
3231
    case 0x20:
3232
        /* LDF */
3233
        gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3234
        break;
3235
    case 0x21:
3236
        /* LDG */
3237
        gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3238
        break;
3239
    case 0x22:
3240
        /* LDS */
3241
        gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3242
        break;
3243
    case 0x23:
3244
        /* LDT */
3245
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3246
        break;
3247
    case 0x24:
3248
        /* STF */
3249
        gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3250
        break;
3251
    case 0x25:
3252
        /* STG */
3253
        gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3254
        break;
3255
    case 0x26:
3256
        /* STS */
3257
        gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3258
        break;
3259
    case 0x27:
3260
        /* STT */
3261
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3262
        break;
3263
    case 0x28:
3264
        /* LDL */
3265
        gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3266
        break;
3267
    case 0x29:
3268
        /* LDQ */
3269
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3270
        break;
3271
    case 0x2A:
3272
        /* LDL_L */
3273
        gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3274
        break;
3275
    case 0x2B:
3276
        /* LDQ_L */
3277
        gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3278
        break;
3279
    case 0x2C:
3280
        /* STL */
3281
        gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3282
        break;
3283
    case 0x2D:
3284
        /* STQ */
3285
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3286
        break;
3287
    case 0x2E:
3288
        /* STL_C */
3289
        ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3290
        break;
3291
    case 0x2F:
3292
        /* STQ_C */
3293
        ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3294
        break;
3295
    case 0x30:
3296
        /* BR */
3297
        ret = gen_bdirect(ctx, ra, disp21);
3298
        break;
3299
    case 0x31: /* FBEQ */
3300
        ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3301
        break;
3302
    case 0x32: /* FBLT */
3303
        ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3304
        break;
3305
    case 0x33: /* FBLE */
3306
        ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3307
        break;
3308
    case 0x34:
3309
        /* BSR */
3310
        ret = gen_bdirect(ctx, ra, disp21);
3311
        break;
3312
    case 0x35: /* FBNE */
3313
        ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3314
        break;
3315
    case 0x36: /* FBGE */
3316
        ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3317
        break;
3318
    case 0x37: /* FBGT */
3319
        ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3320
        break;
3321
    case 0x38:
3322
        /* BLBC */
3323
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3324
        break;
3325
    case 0x39:
3326
        /* BEQ */
3327
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3328
        break;
3329
    case 0x3A:
3330
        /* BLT */
3331
        ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3332
        break;
3333
    case 0x3B:
3334
        /* BLE */
3335
        ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3336
        break;
3337
    case 0x3C:
3338
        /* BLBS */
3339
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3340
        break;
3341
    case 0x3D:
3342
        /* BNE */
3343
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3344
        break;
3345
    case 0x3E:
3346
        /* BGE */
3347
        ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3348
        break;
3349
    case 0x3F:
3350
        /* BGT */
3351
        ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3352
        break;
3353
    invalid_opc:
3354
        ret = gen_invalid(ctx);
3355
        break;
3356
    }
3357

    
3358
    return ret;
3359
}
3360

    
3361
static inline void gen_intermediate_code_internal(CPUAlphaState *env,
3362
                                                  TranslationBlock *tb,
3363
                                                  int search_pc)
3364
{
3365
    DisasContext ctx, *ctxp = &ctx;
3366
    target_ulong pc_start;
3367
    uint32_t insn;
3368
    uint16_t *gen_opc_end;
3369
    CPUBreakpoint *bp;
3370
    int j, lj = -1;
3371
    ExitStatus ret;
3372
    int num_insns;
3373
    int max_insns;
3374

    
3375
    pc_start = tb->pc;
3376
    gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
3377

    
3378
    ctx.tb = tb;
3379
    ctx.env = env;
3380
    ctx.pc = pc_start;
3381
    ctx.mem_idx = cpu_mmu_index(env);
3382

    
3383
    /* ??? Every TB begins with unset rounding mode, to be initialized on
3384
       the first fp insn of the TB.  Alternately we could define a proper
3385
       default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3386
       to reset the FP_STATUS to that default at the end of any TB that
3387
       changes the default.  We could even (gasp) dynamiclly figure out
3388
       what default would be most efficient given the running program.  */
3389
    ctx.tb_rm = -1;
3390
    /* Similarly for flush-to-zero.  */
3391
    ctx.tb_ftz = -1;
3392

    
3393
    num_insns = 0;
3394
    max_insns = tb->cflags & CF_COUNT_MASK;
3395
    if (max_insns == 0)
3396
        max_insns = CF_COUNT_MASK;
3397

    
3398
    gen_icount_start();
3399
    do {
3400
        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3401
            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3402
                if (bp->pc == ctx.pc) {
3403
                    gen_excp(&ctx, EXCP_DEBUG, 0);
3404
                    break;
3405
                }
3406
            }
3407
        }
3408
        if (search_pc) {
3409
            j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3410
            if (lj < j) {
3411
                lj++;
3412
                while (lj < j)
3413
                    tcg_ctx.gen_opc_instr_start[lj++] = 0;
3414
            }
3415
            tcg_ctx.gen_opc_pc[lj] = ctx.pc;
3416
            tcg_ctx.gen_opc_instr_start[lj] = 1;
3417
            tcg_ctx.gen_opc_icount[lj] = num_insns;
3418
        }
3419
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3420
            gen_io_start();
3421
        insn = cpu_ldl_code(env, ctx.pc);
3422
        num_insns++;
3423

    
3424
        if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
3425
            tcg_gen_debug_insn_start(ctx.pc);
3426
        }
3427

    
3428
        ctx.pc += 4;
3429
        ret = translate_one(ctxp, insn);
3430

    
3431
        /* If we reach a page boundary, are single stepping,
3432
           or exhaust instruction count, stop generation.  */
3433
        if (ret == NO_EXIT
3434
            && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3435
                || tcg_ctx.gen_opc_ptr >= gen_opc_end
3436
                || num_insns >= max_insns
3437
                || singlestep
3438
                || env->singlestep_enabled)) {
3439
            ret = EXIT_PC_STALE;
3440
        }
3441
    } while (ret == NO_EXIT);
3442

    
3443
    if (tb->cflags & CF_LAST_IO) {
3444
        gen_io_end();
3445
    }
3446

    
3447
    switch (ret) {
3448
    case EXIT_GOTO_TB:
3449
    case EXIT_NORETURN:
3450
        break;
3451
    case EXIT_PC_STALE:
3452
        tcg_gen_movi_i64(cpu_pc, ctx.pc);
3453
        /* FALLTHRU */
3454
    case EXIT_PC_UPDATED:
3455
        if (env->singlestep_enabled) {
3456
            gen_excp_1(EXCP_DEBUG, 0);
3457
        } else {
3458
            tcg_gen_exit_tb(0);
3459
        }
3460
        break;
3461
    default:
3462
        abort();
3463
    }
3464

    
3465
    gen_icount_end(tb, num_insns);
3466
    *tcg_ctx.gen_opc_ptr = INDEX_op_end;
3467
    if (search_pc) {
3468
        j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3469
        lj++;
3470
        while (lj <= j)
3471
            tcg_ctx.gen_opc_instr_start[lj++] = 0;
3472
    } else {
3473
        tb->size = ctx.pc - pc_start;
3474
        tb->icount = num_insns;
3475
    }
3476

    
3477
#ifdef DEBUG_DISAS
3478
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3479
        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3480
        log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
3481
        qemu_log("\n");
3482
    }
3483
#endif
3484
}
3485

    
3486
void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
3487
{
3488
    gen_intermediate_code_internal(env, tb, 0);
3489
}
3490

    
3491
void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
3492
{
3493
    gen_intermediate_code_internal(env, tb, 1);
3494
}
3495

    
3496
struct cpu_def_t {
3497
    const char *name;
3498
    int implver, amask;
3499
};
3500

    
3501
static const struct cpu_def_t cpu_defs[] = {
3502
    { "ev4",   IMPLVER_2106x, 0 },
3503
    { "ev5",   IMPLVER_21164, 0 },
3504
    { "ev56",  IMPLVER_21164, AMASK_BWX },
3505
    { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3506
    { "ev6",   IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3507
    { "ev67",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3508
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3509
    { "ev68",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3510
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3511
    { "21064", IMPLVER_2106x, 0 },
3512
    { "21164", IMPLVER_21164, 0 },
3513
    { "21164a", IMPLVER_21164, AMASK_BWX },
3514
    { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3515
    { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3516
    { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3517
                                | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3518
};
3519

    
3520
CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3521
{
3522
    AlphaCPU *cpu;
3523
    CPUAlphaState *env;
3524
    int implver, amask, i, max;
3525

    
3526
    cpu = ALPHA_CPU(object_new(TYPE_ALPHA_CPU));
3527
    env = &cpu->env;
3528

    
3529
    alpha_translate_init();
3530

    
3531
    /* Default to ev67; no reason not to emulate insns by default.  */
3532
    implver = IMPLVER_21264;
3533
    amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3534
             | AMASK_TRAP | AMASK_PREFETCH);
3535

    
3536
    max = ARRAY_SIZE(cpu_defs);
3537
    for (i = 0; i < max; i++) {
3538
        if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3539
            implver = cpu_defs[i].implver;
3540
            amask = cpu_defs[i].amask;
3541
            break;
3542
        }
3543
    }
3544
    env->implver = implver;
3545
    env->amask = amask;
3546
    env->cpu_model_str = cpu_model;
3547

    
3548
    qemu_init_vcpu(env);
3549
    return env;
3550
}
3551

    
3552
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
3553
{
3554
    env->pc = tcg_ctx.gen_opc_pc[pc_pos];
3555
}