Statistics
| Branch: | Revision:

root / target-alpha / translate.c @ 2958620f

History | View | Annotate | Download (106.8 kB)

1
/*
2
 *  Alpha emulation cpu translation for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "cpu.h"
21
#include "disas.h"
22
#include "host-utils.h"
23
#include "tcg-op.h"
24

    
25
#include "helper.h"
26
#define GEN_HELPER 1
27
#include "helper.h"
28

    
29
#undef ALPHA_DEBUG_DISAS
30
#define CONFIG_SOFTFLOAT_INLINE
31

    
32
#ifdef ALPHA_DEBUG_DISAS
33
#  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34
#else
35
#  define LOG_DISAS(...) do { } while (0)
36
#endif
37

    
38
typedef struct DisasContext DisasContext;
39
struct DisasContext {
40
    struct TranslationBlock *tb;
41
    CPUAlphaState *env;
42
    uint64_t pc;
43
    int mem_idx;
44

    
45
    /* Current rounding mode for this TB.  */
46
    int tb_rm;
47
    /* Current flush-to-zero setting for this TB.  */
48
    int tb_ftz;
49
};
50

    
51
/* Return values from translate_one, indicating the state of the TB.
52
   Note that zero indicates that we are not exiting the TB.  */
53

    
54
typedef enum {
55
    NO_EXIT,
56

    
57
    /* We have emitted one or more goto_tb.  No fixup required.  */
58
    EXIT_GOTO_TB,
59

    
60
    /* We are not using a goto_tb (for whatever reason), but have updated
61
       the PC (for whatever reason), so there's no need to do it again on
62
       exiting the TB.  */
63
    EXIT_PC_UPDATED,
64

    
65
    /* We are exiting the TB, but have neither emitted a goto_tb, nor
66
       updated the PC for the next instruction to be executed.  */
67
    EXIT_PC_STALE,
68

    
69
    /* We are ending the TB with a noreturn function call, e.g. longjmp.
70
       No following code will be executed.  */
71
    EXIT_NORETURN,
72
} ExitStatus;
73

    
74
/* global register indexes */
75
static TCGv_ptr cpu_env;
76
static TCGv cpu_ir[31];
77
static TCGv cpu_fir[31];
78
static TCGv cpu_pc;
79
static TCGv cpu_lock_addr;
80
static TCGv cpu_lock_st_addr;
81
static TCGv cpu_lock_value;
82
static TCGv cpu_unique;
83
#ifndef CONFIG_USER_ONLY
84
static TCGv cpu_sysval;
85
static TCGv cpu_usp;
86
#endif
87

    
88
/* register names */
89
static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
90

    
91
#include "gen-icount.h"
92

    
93
static void alpha_translate_init(void)
94
{
95
    int i;
96
    char *p;
97
    static int done_init = 0;
98

    
99
    if (done_init)
100
        return;
101

    
102
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103

    
104
    p = cpu_reg_names;
105
    for (i = 0; i < 31; i++) {
106
        sprintf(p, "ir%d", i);
107
        cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
108
                                           offsetof(CPUAlphaState, ir[i]), p);
109
        p += (i < 10) ? 4 : 5;
110

    
111
        sprintf(p, "fir%d", i);
112
        cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
113
                                            offsetof(CPUAlphaState, fir[i]), p);
114
        p += (i < 10) ? 5 : 6;
115
    }
116

    
117
    cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
118
                                    offsetof(CPUAlphaState, pc), "pc");
119

    
120
    cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
121
                                           offsetof(CPUAlphaState, lock_addr),
122
                                           "lock_addr");
123
    cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
124
                                              offsetof(CPUAlphaState, lock_st_addr),
125
                                              "lock_st_addr");
126
    cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
127
                                            offsetof(CPUAlphaState, lock_value),
128
                                            "lock_value");
129

    
130
    cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
131
                                        offsetof(CPUAlphaState, unique), "unique");
132
#ifndef CONFIG_USER_ONLY
133
    cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
134
                                        offsetof(CPUAlphaState, sysval), "sysval");
135
    cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
136
                                     offsetof(CPUAlphaState, usp), "usp");
137
#endif
138

    
139
    /* register helpers */
140
#define GEN_HELPER 2
141
#include "helper.h"
142

    
143
    done_init = 1;
144
}
145

    
146
static void gen_excp_1(int exception, int error_code)
147
{
148
    TCGv_i32 tmp1, tmp2;
149

    
150
    tmp1 = tcg_const_i32(exception);
151
    tmp2 = tcg_const_i32(error_code);
152
    gen_helper_excp(cpu_env, tmp1, tmp2);
153
    tcg_temp_free_i32(tmp2);
154
    tcg_temp_free_i32(tmp1);
155
}
156

    
157
static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
158
{
159
    tcg_gen_movi_i64(cpu_pc, ctx->pc);
160
    gen_excp_1(exception, error_code);
161
    return EXIT_NORETURN;
162
}
163

    
164
static inline ExitStatus gen_invalid(DisasContext *ctx)
165
{
166
    return gen_excp(ctx, EXCP_OPCDEC, 0);
167
}
168

    
169
static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
170
{
171
    TCGv tmp = tcg_temp_new();
172
    TCGv_i32 tmp32 = tcg_temp_new_i32();
173
    tcg_gen_qemu_ld32u(tmp, t1, flags);
174
    tcg_gen_trunc_i64_i32(tmp32, tmp);
175
    gen_helper_memory_to_f(t0, tmp32);
176
    tcg_temp_free_i32(tmp32);
177
    tcg_temp_free(tmp);
178
}
179

    
180
static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
181
{
182
    TCGv tmp = tcg_temp_new();
183
    tcg_gen_qemu_ld64(tmp, t1, flags);
184
    gen_helper_memory_to_g(t0, tmp);
185
    tcg_temp_free(tmp);
186
}
187

    
188
static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
189
{
190
    TCGv tmp = tcg_temp_new();
191
    TCGv_i32 tmp32 = tcg_temp_new_i32();
192
    tcg_gen_qemu_ld32u(tmp, t1, flags);
193
    tcg_gen_trunc_i64_i32(tmp32, tmp);
194
    gen_helper_memory_to_s(t0, tmp32);
195
    tcg_temp_free_i32(tmp32);
196
    tcg_temp_free(tmp);
197
}
198

    
199
static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
200
{
201
    tcg_gen_qemu_ld32s(t0, t1, flags);
202
    tcg_gen_mov_i64(cpu_lock_addr, t1);
203
    tcg_gen_mov_i64(cpu_lock_value, t0);
204
}
205

    
206
static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
207
{
208
    tcg_gen_qemu_ld64(t0, t1, flags);
209
    tcg_gen_mov_i64(cpu_lock_addr, t1);
210
    tcg_gen_mov_i64(cpu_lock_value, t0);
211
}
212

    
213
static inline void gen_load_mem(DisasContext *ctx,
214
                                void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
215
                                                          int flags),
216
                                int ra, int rb, int32_t disp16, int fp,
217
                                int clear)
218
{
219
    TCGv addr, va;
220

    
221
    /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
222
       prefetches, which we can treat as nops.  No worries about
223
       missed exceptions here.  */
224
    if (unlikely(ra == 31)) {
225
        return;
226
    }
227

    
228
    addr = tcg_temp_new();
229
    if (rb != 31) {
230
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
231
        if (clear) {
232
            tcg_gen_andi_i64(addr, addr, ~0x7);
233
        }
234
    } else {
235
        if (clear) {
236
            disp16 &= ~0x7;
237
        }
238
        tcg_gen_movi_i64(addr, disp16);
239
    }
240

    
241
    va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
242
    tcg_gen_qemu_load(va, addr, ctx->mem_idx);
243

    
244
    tcg_temp_free(addr);
245
}
246

    
247
static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
248
{
249
    TCGv_i32 tmp32 = tcg_temp_new_i32();
250
    TCGv tmp = tcg_temp_new();
251
    gen_helper_f_to_memory(tmp32, t0);
252
    tcg_gen_extu_i32_i64(tmp, tmp32);
253
    tcg_gen_qemu_st32(tmp, t1, flags);
254
    tcg_temp_free(tmp);
255
    tcg_temp_free_i32(tmp32);
256
}
257

    
258
static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
259
{
260
    TCGv tmp = tcg_temp_new();
261
    gen_helper_g_to_memory(tmp, t0);
262
    tcg_gen_qemu_st64(tmp, t1, flags);
263
    tcg_temp_free(tmp);
264
}
265

    
266
static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
267
{
268
    TCGv_i32 tmp32 = tcg_temp_new_i32();
269
    TCGv tmp = tcg_temp_new();
270
    gen_helper_s_to_memory(tmp32, t0);
271
    tcg_gen_extu_i32_i64(tmp, tmp32);
272
    tcg_gen_qemu_st32(tmp, t1, flags);
273
    tcg_temp_free(tmp);
274
    tcg_temp_free_i32(tmp32);
275
}
276

    
277
static inline void gen_store_mem(DisasContext *ctx,
278
                                 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
279
                                                            int flags),
280
                                 int ra, int rb, int32_t disp16, int fp,
281
                                 int clear)
282
{
283
    TCGv addr, va;
284

    
285
    addr = tcg_temp_new();
286
    if (rb != 31) {
287
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
288
        if (clear) {
289
            tcg_gen_andi_i64(addr, addr, ~0x7);
290
        }
291
    } else {
292
        if (clear) {
293
            disp16 &= ~0x7;
294
        }
295
        tcg_gen_movi_i64(addr, disp16);
296
    }
297

    
298
    if (ra == 31) {
299
        va = tcg_const_i64(0);
300
    } else {
301
        va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
302
    }
303
    tcg_gen_qemu_store(va, addr, ctx->mem_idx);
304

    
305
    tcg_temp_free(addr);
306
    if (ra == 31) {
307
        tcg_temp_free(va);
308
    }
309
}
310

    
311
static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
312
                                        int32_t disp16, int quad)
313
{
314
    TCGv addr;
315

    
316
    if (ra == 31) {
317
        /* ??? Don't bother storing anything.  The user can't tell
318
           the difference, since the zero register always reads zero.  */
319
        return NO_EXIT;
320
    }
321

    
322
#if defined(CONFIG_USER_ONLY)
323
    addr = cpu_lock_st_addr;
324
#else
325
    addr = tcg_temp_local_new();
326
#endif
327

    
328
    if (rb != 31) {
329
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
330
    } else {
331
        tcg_gen_movi_i64(addr, disp16);
332
    }
333

    
334
#if defined(CONFIG_USER_ONLY)
335
    /* ??? This is handled via a complicated version of compare-and-swap
336
       in the cpu_loop.  Hopefully one day we'll have a real CAS opcode
337
       in TCG so that this isn't necessary.  */
338
    return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
339
#else
340
    /* ??? In system mode we are never multi-threaded, so CAS can be
341
       implemented via a non-atomic load-compare-store sequence.  */
342
    {
343
        int lab_fail, lab_done;
344
        TCGv val;
345

    
346
        lab_fail = gen_new_label();
347
        lab_done = gen_new_label();
348
        tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
349

    
350
        val = tcg_temp_new();
351
        if (quad) {
352
            tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
353
        } else {
354
            tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
355
        }
356
        tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
357

    
358
        if (quad) {
359
            tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
360
        } else {
361
            tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
362
        }
363
        tcg_gen_movi_i64(cpu_ir[ra], 1);
364
        tcg_gen_br(lab_done);
365

    
366
        gen_set_label(lab_fail);
367
        tcg_gen_movi_i64(cpu_ir[ra], 0);
368

    
369
        gen_set_label(lab_done);
370
        tcg_gen_movi_i64(cpu_lock_addr, -1);
371

    
372
        tcg_temp_free(addr);
373
        return NO_EXIT;
374
    }
375
#endif
376
}
377

    
378
static int use_goto_tb(DisasContext *ctx, uint64_t dest)
379
{
380
    /* Check for the dest on the same page as the start of the TB.  We
381
       also want to suppress goto_tb in the case of single-steping and IO.  */
382
    return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
383
            && !ctx->env->singlestep_enabled
384
            && !(ctx->tb->cflags & CF_LAST_IO));
385
}
386

    
387
static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
388
{
389
    uint64_t dest = ctx->pc + (disp << 2);
390

    
391
    if (ra != 31) {
392
        tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
393
    }
394

    
395
    /* Notice branch-to-next; used to initialize RA with the PC.  */
396
    if (disp == 0) {
397
        return 0;
398
    } else if (use_goto_tb(ctx, dest)) {
399
        tcg_gen_goto_tb(0);
400
        tcg_gen_movi_i64(cpu_pc, dest);
401
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
402
        return EXIT_GOTO_TB;
403
    } else {
404
        tcg_gen_movi_i64(cpu_pc, dest);
405
        return EXIT_PC_UPDATED;
406
    }
407
}
408

    
409
static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
410
                                     TCGv cmp, int32_t disp)
411
{
412
    uint64_t dest = ctx->pc + (disp << 2);
413
    int lab_true = gen_new_label();
414

    
415
    if (use_goto_tb(ctx, dest)) {
416
        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
417

    
418
        tcg_gen_goto_tb(0);
419
        tcg_gen_movi_i64(cpu_pc, ctx->pc);
420
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
421

    
422
        gen_set_label(lab_true);
423
        tcg_gen_goto_tb(1);
424
        tcg_gen_movi_i64(cpu_pc, dest);
425
        tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
426

    
427
        return EXIT_GOTO_TB;
428
    } else {
429
        int lab_over = gen_new_label();
430

    
431
        /* ??? Consider using either
432
             movi pc, next
433
             addi tmp, pc, disp
434
             movcond pc, cond, 0, tmp, pc
435
           or
436
             setcond tmp, cond, 0
437
             movi pc, next
438
             neg tmp, tmp
439
             andi tmp, tmp, disp
440
             add pc, pc, tmp
441
           The current diamond subgraph surely isn't efficient.  */
442

    
443
        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
444
        tcg_gen_movi_i64(cpu_pc, ctx->pc);
445
        tcg_gen_br(lab_over);
446
        gen_set_label(lab_true);
447
        tcg_gen_movi_i64(cpu_pc, dest);
448
        gen_set_label(lab_over);
449

    
450
        return EXIT_PC_UPDATED;
451
    }
452
}
453

    
454
static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
455
                            int32_t disp, int mask)
456
{
457
    TCGv cmp_tmp;
458

    
459
    if (unlikely(ra == 31)) {
460
        cmp_tmp = tcg_const_i64(0);
461
    } else {
462
        cmp_tmp = tcg_temp_new();
463
        if (mask) {
464
            tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
465
        } else {
466
            tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
467
        }
468
    }
469

    
470
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
471
}
472

    
473
/* Fold -0.0 for comparison with COND.  */
474

    
475
static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
476
{
477
    uint64_t mzero = 1ull << 63;
478

    
479
    switch (cond) {
480
    case TCG_COND_LE:
481
    case TCG_COND_GT:
482
        /* For <= or >, the -0.0 value directly compares the way we want.  */
483
        tcg_gen_mov_i64(dest, src);
484
        break;
485

    
486
    case TCG_COND_EQ:
487
    case TCG_COND_NE:
488
        /* For == or !=, we can simply mask off the sign bit and compare.  */
489
        tcg_gen_andi_i64(dest, src, mzero - 1);
490
        break;
491

    
492
    case TCG_COND_GE:
493
    case TCG_COND_LT:
494
        /* For >= or <, map -0.0 to +0.0 via comparison and mask.  */
495
        tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
496
        tcg_gen_neg_i64(dest, dest);
497
        tcg_gen_and_i64(dest, dest, src);
498
        break;
499

    
500
    default:
501
        abort();
502
    }
503
}
504

    
505
static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
506
                             int32_t disp)
507
{
508
    TCGv cmp_tmp;
509

    
510
    if (unlikely(ra == 31)) {
511
        /* Very uncommon case, but easier to optimize it to an integer
512
           comparison than continuing with the floating point comparison.  */
513
        return gen_bcond(ctx, cond, ra, disp, 0);
514
    }
515

    
516
    cmp_tmp = tcg_temp_new();
517
    gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
518
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
519
}
520

    
521
static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
522
                     int islit, uint8_t lit, int mask)
523
{
524
    TCGCond inv_cond = tcg_invert_cond(cond);
525
    int l1;
526

    
527
    if (unlikely(rc == 31))
528
        return;
529

    
530
    l1 = gen_new_label();
531

    
532
    if (ra != 31) {
533
        if (mask) {
534
            TCGv tmp = tcg_temp_new();
535
            tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
536
            tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
537
            tcg_temp_free(tmp);
538
        } else
539
            tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
540
    } else {
541
        /* Very uncommon case - Do not bother to optimize.  */
542
        TCGv tmp = tcg_const_i64(0);
543
        tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
544
        tcg_temp_free(tmp);
545
    }
546

    
547
    if (islit)
548
        tcg_gen_movi_i64(cpu_ir[rc], lit);
549
    else
550
        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
551
    gen_set_label(l1);
552
}
553

    
554
static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
555
{
556
    TCGv cmp_tmp;
557
    int l1;
558

    
559
    if (unlikely(rc == 31)) {
560
        return;
561
    }
562

    
563
    cmp_tmp = tcg_temp_new();
564
    if (unlikely(ra == 31)) {
565
        tcg_gen_movi_i64(cmp_tmp, 0);
566
    } else {
567
        gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
568
    }
569

    
570
    l1 = gen_new_label();
571
    tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
572
    tcg_temp_free(cmp_tmp);
573

    
574
    if (rb != 31)
575
        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
576
    else
577
        tcg_gen_movi_i64(cpu_fir[rc], 0);
578
    gen_set_label(l1);
579
}
580

    
581
#define QUAL_RM_N       0x080   /* Round mode nearest even */
582
#define QUAL_RM_C       0x000   /* Round mode chopped */
583
#define QUAL_RM_M       0x040   /* Round mode minus infinity */
584
#define QUAL_RM_D       0x0c0   /* Round mode dynamic */
585
#define QUAL_RM_MASK    0x0c0
586

    
587
#define QUAL_U          0x100   /* Underflow enable (fp output) */
588
#define QUAL_V          0x100   /* Overflow enable (int output) */
589
#define QUAL_S          0x400   /* Software completion enable */
590
#define QUAL_I          0x200   /* Inexact detection enable */
591

    
592
static void gen_qual_roundmode(DisasContext *ctx, int fn11)
593
{
594
    TCGv_i32 tmp;
595

    
596
    fn11 &= QUAL_RM_MASK;
597
    if (fn11 == ctx->tb_rm) {
598
        return;
599
    }
600
    ctx->tb_rm = fn11;
601

    
602
    tmp = tcg_temp_new_i32();
603
    switch (fn11) {
604
    case QUAL_RM_N:
605
        tcg_gen_movi_i32(tmp, float_round_nearest_even);
606
        break;
607
    case QUAL_RM_C:
608
        tcg_gen_movi_i32(tmp, float_round_to_zero);
609
        break;
610
    case QUAL_RM_M:
611
        tcg_gen_movi_i32(tmp, float_round_down);
612
        break;
613
    case QUAL_RM_D:
614
        tcg_gen_ld8u_i32(tmp, cpu_env,
615
                         offsetof(CPUAlphaState, fpcr_dyn_round));
616
        break;
617
    }
618

    
619
#if defined(CONFIG_SOFTFLOAT_INLINE)
620
    /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
621
       With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
622
       sets the one field.  */
623
    tcg_gen_st8_i32(tmp, cpu_env,
624
                    offsetof(CPUAlphaState, fp_status.float_rounding_mode));
625
#else
626
    gen_helper_setroundmode(tmp);
627
#endif
628

    
629
    tcg_temp_free_i32(tmp);
630
}
631

    
632
static void gen_qual_flushzero(DisasContext *ctx, int fn11)
633
{
634
    TCGv_i32 tmp;
635

    
636
    fn11 &= QUAL_U;
637
    if (fn11 == ctx->tb_ftz) {
638
        return;
639
    }
640
    ctx->tb_ftz = fn11;
641

    
642
    tmp = tcg_temp_new_i32();
643
    if (fn11) {
644
        /* Underflow is enabled, use the FPCR setting.  */
645
        tcg_gen_ld8u_i32(tmp, cpu_env,
646
                         offsetof(CPUAlphaState, fpcr_flush_to_zero));
647
    } else {
648
        /* Underflow is disabled, force flush-to-zero.  */
649
        tcg_gen_movi_i32(tmp, 1);
650
    }
651

    
652
#if defined(CONFIG_SOFTFLOAT_INLINE)
653
    tcg_gen_st8_i32(tmp, cpu_env,
654
                    offsetof(CPUAlphaState, fp_status.flush_to_zero));
655
#else
656
    gen_helper_setflushzero(tmp);
657
#endif
658

    
659
    tcg_temp_free_i32(tmp);
660
}
661

    
662
static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
663
{
664
    TCGv val = tcg_temp_new();
665
    if (reg == 31) {
666
        tcg_gen_movi_i64(val, 0);
667
    } else if (fn11 & QUAL_S) {
668
        gen_helper_ieee_input_s(val, cpu_env, cpu_fir[reg]);
669
    } else if (is_cmp) {
670
        gen_helper_ieee_input_cmp(val, cpu_env, cpu_fir[reg]);
671
    } else {
672
        gen_helper_ieee_input(val, cpu_env, cpu_fir[reg]);
673
    }
674
    return val;
675
}
676

    
677
static void gen_fp_exc_clear(void)
678
{
679
#if defined(CONFIG_SOFTFLOAT_INLINE)
680
    TCGv_i32 zero = tcg_const_i32(0);
681
    tcg_gen_st8_i32(zero, cpu_env,
682
                    offsetof(CPUAlphaState, fp_status.float_exception_flags));
683
    tcg_temp_free_i32(zero);
684
#else
685
    gen_helper_fp_exc_clear(cpu_env);
686
#endif
687
}
688

    
689
static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
690
{
691
    /* ??? We ought to be able to do something with imprecise exceptions.
692
       E.g. notice we're still in the trap shadow of something within the
693
       TB and do not generate the code to signal the exception; end the TB
694
       when an exception is forced to arrive, either by consumption of a
695
       register value or TRAPB or EXCB.  */
696
    TCGv_i32 exc = tcg_temp_new_i32();
697
    TCGv_i32 reg;
698

    
699
#if defined(CONFIG_SOFTFLOAT_INLINE)
700
    tcg_gen_ld8u_i32(exc, cpu_env,
701
                     offsetof(CPUAlphaState, fp_status.float_exception_flags));
702
#else
703
    gen_helper_fp_exc_get(exc, cpu_env);
704
#endif
705

    
706
    if (ignore) {
707
        tcg_gen_andi_i32(exc, exc, ~ignore);
708
    }
709

    
710
    /* ??? Pass in the regno of the destination so that the helper can
711
       set EXC_MASK, which contains a bitmask of destination registers
712
       that have caused arithmetic traps.  A simple userspace emulation
713
       does not require this.  We do need it for a guest kernel's entArith,
714
       or if we were to do something clever with imprecise exceptions.  */
715
    reg = tcg_const_i32(rc + 32);
716

    
717
    if (fn11 & QUAL_S) {
718
        gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
719
    } else {
720
        gen_helper_fp_exc_raise(cpu_env, exc, reg);
721
    }
722

    
723
    tcg_temp_free_i32(reg);
724
    tcg_temp_free_i32(exc);
725
}
726

    
727
static inline void gen_fp_exc_raise(int rc, int fn11)
728
{
729
    gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
730
}
731

    
732
static void gen_fcvtlq(int rb, int rc)
733
{
734
    if (unlikely(rc == 31)) {
735
        return;
736
    }
737
    if (unlikely(rb == 31)) {
738
        tcg_gen_movi_i64(cpu_fir[rc], 0);
739
    } else {
740
        TCGv tmp = tcg_temp_new();
741

    
742
        /* The arithmetic right shift here, plus the sign-extended mask below
743
           yields a sign-extended result without an explicit ext32s_i64.  */
744
        tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
745
        tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
746
        tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
747
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
748
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
749

    
750
        tcg_temp_free(tmp);
751
    }
752
}
753

    
754
static void gen_fcvtql(int rb, int rc)
755
{
756
    if (unlikely(rc == 31)) {
757
        return;
758
    }
759
    if (unlikely(rb == 31)) {
760
        tcg_gen_movi_i64(cpu_fir[rc], 0);
761
    } else {
762
        TCGv tmp = tcg_temp_new();
763

    
764
        tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
765
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
766
        tcg_gen_shli_i64(tmp, tmp, 32);
767
        tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
768
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
769

    
770
        tcg_temp_free(tmp);
771
    }
772
}
773

    
774
static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
775
{
776
    if (rb != 31) {
777
        int lab = gen_new_label();
778
        TCGv tmp = tcg_temp_new();
779

    
780
        tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
781
        tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
782
        gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
783

    
784
        gen_set_label(lab);
785
    }
786
    gen_fcvtql(rb, rc);
787
}
788

    
789
#define FARITH2(name)                                                   \
790
    static inline void glue(gen_f, name)(int rb, int rc)                \
791
    {                                                                   \
792
        if (unlikely(rc == 31)) {                                       \
793
            return;                                                     \
794
        }                                                               \
795
        if (rb != 31) {                                                 \
796
            gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]);     \
797
        } else {                                                        \
798
            TCGv tmp = tcg_const_i64(0);                                \
799
            gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp);             \
800
            tcg_temp_free(tmp);                                         \
801
        }                                                               \
802
    }
803

    
804
/* ??? VAX instruction qualifiers ignored.  */
805
FARITH2(sqrtf)
806
FARITH2(sqrtg)
807
FARITH2(cvtgf)
808
FARITH2(cvtgq)
809
FARITH2(cvtqf)
810
FARITH2(cvtqg)
811

    
812
static void gen_ieee_arith2(DisasContext *ctx,
813
                            void (*helper)(TCGv, TCGv_ptr, TCGv),
814
                            int rb, int rc, int fn11)
815
{
816
    TCGv vb;
817

    
818
    /* ??? This is wrong: the instruction is not a nop, it still may
819
       raise exceptions.  */
820
    if (unlikely(rc == 31)) {
821
        return;
822
    }
823

    
824
    gen_qual_roundmode(ctx, fn11);
825
    gen_qual_flushzero(ctx, fn11);
826
    gen_fp_exc_clear();
827

    
828
    vb = gen_ieee_input(rb, fn11, 0);
829
    helper(cpu_fir[rc], cpu_env, vb);
830
    tcg_temp_free(vb);
831

    
832
    gen_fp_exc_raise(rc, fn11);
833
}
834

    
835
#define IEEE_ARITH2(name)                                       \
836
static inline void glue(gen_f, name)(DisasContext *ctx,         \
837
                                     int rb, int rc, int fn11)  \
838
{                                                               \
839
    gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
840
}
841
IEEE_ARITH2(sqrts)
842
IEEE_ARITH2(sqrtt)
843
IEEE_ARITH2(cvtst)
844
IEEE_ARITH2(cvtts)
845

    
846
static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
847
{
848
    TCGv vb;
849
    int ignore = 0;
850

    
851
    /* ??? This is wrong: the instruction is not a nop, it still may
852
       raise exceptions.  */
853
    if (unlikely(rc == 31)) {
854
        return;
855
    }
856

    
857
    /* No need to set flushzero, since we have an integer output.  */
858
    gen_fp_exc_clear();
859
    vb = gen_ieee_input(rb, fn11, 0);
860

    
861
    /* Almost all integer conversions use cropped rounding, and most
862
       also do not have integer overflow enabled.  Special case that.  */
863
    switch (fn11) {
864
    case QUAL_RM_C:
865
        gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
866
        break;
867
    case QUAL_V | QUAL_RM_C:
868
    case QUAL_S | QUAL_V | QUAL_RM_C:
869
        ignore = float_flag_inexact;
870
        /* FALLTHRU */
871
    case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
872
        gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
873
        break;
874
    default:
875
        gen_qual_roundmode(ctx, fn11);
876
        gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
877
        ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
878
        ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
879
        break;
880
    }
881
    tcg_temp_free(vb);
882

    
883
    gen_fp_exc_raise_ignore(rc, fn11, ignore);
884
}
885

    
886
static void gen_ieee_intcvt(DisasContext *ctx,
887
                            void (*helper)(TCGv, TCGv_ptr, TCGv),
888
                            int rb, int rc, int fn11)
889
{
890
    TCGv vb;
891

    
892
    /* ??? This is wrong: the instruction is not a nop, it still may
893
       raise exceptions.  */
894
    if (unlikely(rc == 31)) {
895
        return;
896
    }
897

    
898
    gen_qual_roundmode(ctx, fn11);
899

    
900
    if (rb == 31) {
901
        vb = tcg_const_i64(0);
902
    } else {
903
        vb = cpu_fir[rb];
904
    }
905

    
906
    /* The only exception that can be raised by integer conversion
907
       is inexact.  Thus we only need to worry about exceptions when
908
       inexact handling is requested.  */
909
    if (fn11 & QUAL_I) {
910
        gen_fp_exc_clear();
911
        helper(cpu_fir[rc], cpu_env, vb);
912
        gen_fp_exc_raise(rc, fn11);
913
    } else {
914
        helper(cpu_fir[rc], cpu_env, vb);
915
    }
916

    
917
    if (rb == 31) {
918
        tcg_temp_free(vb);
919
    }
920
}
921

    
922
#define IEEE_INTCVT(name)                                       \
923
static inline void glue(gen_f, name)(DisasContext *ctx,         \
924
                                     int rb, int rc, int fn11)  \
925
{                                                               \
926
    gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
927
}
928
IEEE_INTCVT(cvtqs)
929
IEEE_INTCVT(cvtqt)
930

    
931
static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
932
{
933
    TCGv va, vb, vmask;
934
    int za = 0, zb = 0;
935

    
936
    if (unlikely(rc == 31)) {
937
        return;
938
    }
939

    
940
    vmask = tcg_const_i64(mask);
941

    
942
    TCGV_UNUSED_I64(va);
943
    if (ra == 31) {
944
        if (inv_a) {
945
            va = vmask;
946
        } else {
947
            za = 1;
948
        }
949
    } else {
950
        va = tcg_temp_new_i64();
951
        tcg_gen_mov_i64(va, cpu_fir[ra]);
952
        if (inv_a) {
953
            tcg_gen_andc_i64(va, vmask, va);
954
        } else {
955
            tcg_gen_and_i64(va, va, vmask);
956
        }
957
    }
958

    
959
    TCGV_UNUSED_I64(vb);
960
    if (rb == 31) {
961
        zb = 1;
962
    } else {
963
        vb = tcg_temp_new_i64();
964
        tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
965
    }
966

    
967
    switch (za << 1 | zb) {
968
    case 0 | 0:
969
        tcg_gen_or_i64(cpu_fir[rc], va, vb);
970
        break;
971
    case 0 | 1:
972
        tcg_gen_mov_i64(cpu_fir[rc], va);
973
        break;
974
    case 2 | 0:
975
        tcg_gen_mov_i64(cpu_fir[rc], vb);
976
        break;
977
    case 2 | 1:
978
        tcg_gen_movi_i64(cpu_fir[rc], 0);
979
        break;
980
    }
981

    
982
    tcg_temp_free(vmask);
983
    if (ra != 31) {
984
        tcg_temp_free(va);
985
    }
986
    if (rb != 31) {
987
        tcg_temp_free(vb);
988
    }
989
}
990

    
991
static inline void gen_fcpys(int ra, int rb, int rc)
992
{
993
    gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
994
}
995

    
996
static inline void gen_fcpysn(int ra, int rb, int rc)
997
{
998
    gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
999
}
1000

    
1001
static inline void gen_fcpyse(int ra, int rb, int rc)
1002
{
1003
    gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1004
}
1005

    
1006
#define FARITH3(name)                                                   \
1007
    static inline void glue(gen_f, name)(int ra, int rb, int rc)        \
1008
    {                                                                   \
1009
        TCGv va, vb;                                                    \
1010
                                                                        \
1011
        if (unlikely(rc == 31)) {                                       \
1012
            return;                                                     \
1013
        }                                                               \
1014
        if (ra == 31) {                                                 \
1015
            va = tcg_const_i64(0);                                      \
1016
        } else {                                                        \
1017
            va = cpu_fir[ra];                                           \
1018
        }                                                               \
1019
        if (rb == 31) {                                                 \
1020
            vb = tcg_const_i64(0);                                      \
1021
        } else {                                                        \
1022
            vb = cpu_fir[rb];                                           \
1023
        }                                                               \
1024
                                                                        \
1025
        gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb);              \
1026
                                                                        \
1027
        if (ra == 31) {                                                 \
1028
            tcg_temp_free(va);                                          \
1029
        }                                                               \
1030
        if (rb == 31) {                                                 \
1031
            tcg_temp_free(vb);                                          \
1032
        }                                                               \
1033
    }
1034

    
1035
/* ??? VAX instruction qualifiers ignored.  */
1036
FARITH3(addf)
1037
FARITH3(subf)
1038
FARITH3(mulf)
1039
FARITH3(divf)
1040
FARITH3(addg)
1041
FARITH3(subg)
1042
FARITH3(mulg)
1043
FARITH3(divg)
1044
FARITH3(cmpgeq)
1045
FARITH3(cmpglt)
1046
FARITH3(cmpgle)
1047

    
1048
static void gen_ieee_arith3(DisasContext *ctx,
1049
                            void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1050
                            int ra, int rb, int rc, int fn11)
1051
{
1052
    TCGv va, vb;
1053

    
1054
    /* ??? This is wrong: the instruction is not a nop, it still may
1055
       raise exceptions.  */
1056
    if (unlikely(rc == 31)) {
1057
        return;
1058
    }
1059

    
1060
    gen_qual_roundmode(ctx, fn11);
1061
    gen_qual_flushzero(ctx, fn11);
1062
    gen_fp_exc_clear();
1063

    
1064
    va = gen_ieee_input(ra, fn11, 0);
1065
    vb = gen_ieee_input(rb, fn11, 0);
1066
    helper(cpu_fir[rc], cpu_env, va, vb);
1067
    tcg_temp_free(va);
1068
    tcg_temp_free(vb);
1069

    
1070
    gen_fp_exc_raise(rc, fn11);
1071
}
1072

    
1073
#define IEEE_ARITH3(name)                                               \
1074
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1075
                                     int ra, int rb, int rc, int fn11)  \
1076
{                                                                       \
1077
    gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
1078
}
1079
IEEE_ARITH3(adds)
1080
IEEE_ARITH3(subs)
1081
IEEE_ARITH3(muls)
1082
IEEE_ARITH3(divs)
1083
IEEE_ARITH3(addt)
1084
IEEE_ARITH3(subt)
1085
IEEE_ARITH3(mult)
1086
IEEE_ARITH3(divt)
1087

    
1088
static void gen_ieee_compare(DisasContext *ctx,
1089
                             void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1090
                             int ra, int rb, int rc, int fn11)
1091
{
1092
    TCGv va, vb;
1093

    
1094
    /* ??? This is wrong: the instruction is not a nop, it still may
1095
       raise exceptions.  */
1096
    if (unlikely(rc == 31)) {
1097
        return;
1098
    }
1099

    
1100
    gen_fp_exc_clear();
1101

    
1102
    va = gen_ieee_input(ra, fn11, 1);
1103
    vb = gen_ieee_input(rb, fn11, 1);
1104
    helper(cpu_fir[rc], cpu_env, va, vb);
1105
    tcg_temp_free(va);
1106
    tcg_temp_free(vb);
1107

    
1108
    gen_fp_exc_raise(rc, fn11);
1109
}
1110

    
1111
#define IEEE_CMP3(name)                                                 \
1112
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1113
                                     int ra, int rb, int rc, int fn11)  \
1114
{                                                                       \
1115
    gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
1116
}
1117
IEEE_CMP3(cmptun)
1118
IEEE_CMP3(cmpteq)
1119
IEEE_CMP3(cmptlt)
1120
IEEE_CMP3(cmptle)
1121

    
1122
static inline uint64_t zapnot_mask(uint8_t lit)
1123
{
1124
    uint64_t mask = 0;
1125
    int i;
1126

    
1127
    for (i = 0; i < 8; ++i) {
1128
        if ((lit >> i) & 1)
1129
            mask |= 0xffull << (i * 8);
1130
    }
1131
    return mask;
1132
}
1133

    
1134
/* Implement zapnot with an immediate operand, which expands to some
1135
   form of immediate AND.  This is a basic building block in the
1136
   definition of many of the other byte manipulation instructions.  */
1137
static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1138
{
1139
    switch (lit) {
1140
    case 0x00:
1141
        tcg_gen_movi_i64(dest, 0);
1142
        break;
1143
    case 0x01:
1144
        tcg_gen_ext8u_i64(dest, src);
1145
        break;
1146
    case 0x03:
1147
        tcg_gen_ext16u_i64(dest, src);
1148
        break;
1149
    case 0x0f:
1150
        tcg_gen_ext32u_i64(dest, src);
1151
        break;
1152
    case 0xff:
1153
        tcg_gen_mov_i64(dest, src);
1154
        break;
1155
    default:
1156
        tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1157
        break;
1158
    }
1159
}
1160

    
1161
static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1162
{
1163
    if (unlikely(rc == 31))
1164
        return;
1165
    else if (unlikely(ra == 31))
1166
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1167
    else if (islit)
1168
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1169
    else
1170
        gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1171
}
1172

    
1173
static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1174
{
1175
    if (unlikely(rc == 31))
1176
        return;
1177
    else if (unlikely(ra == 31))
1178
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1179
    else if (islit)
1180
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1181
    else
1182
        gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1183
}
1184

    
1185

    
1186
/* EXTWH, EXTLH, EXTQH */
1187
static void gen_ext_h(int ra, int rb, int rc, int islit,
1188
                      uint8_t lit, uint8_t byte_mask)
1189
{
1190
    if (unlikely(rc == 31))
1191
        return;
1192
    else if (unlikely(ra == 31))
1193
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1194
    else {
1195
        if (islit) {
1196
            lit = (64 - (lit & 7) * 8) & 0x3f;
1197
            tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1198
        } else {
1199
            TCGv tmp1 = tcg_temp_new();
1200
            tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1201
            tcg_gen_shli_i64(tmp1, tmp1, 3);
1202
            tcg_gen_neg_i64(tmp1, tmp1);
1203
            tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1204
            tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1205
            tcg_temp_free(tmp1);
1206
        }
1207
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1208
    }
1209
}
1210

    
1211
/* EXTBL, EXTWL, EXTLL, EXTQL */
1212
static void gen_ext_l(int ra, int rb, int rc, int islit,
1213
                      uint8_t lit, uint8_t byte_mask)
1214
{
1215
    if (unlikely(rc == 31))
1216
        return;
1217
    else if (unlikely(ra == 31))
1218
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1219
    else {
1220
        if (islit) {
1221
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1222
        } else {
1223
            TCGv tmp = tcg_temp_new();
1224
            tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1225
            tcg_gen_shli_i64(tmp, tmp, 3);
1226
            tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1227
            tcg_temp_free(tmp);
1228
        }
1229
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1230
    }
1231
}
1232

    
1233
/* INSWH, INSLH, INSQH */
1234
static void gen_ins_h(int ra, int rb, int rc, int islit,
1235
                      uint8_t lit, uint8_t byte_mask)
1236
{
1237
    if (unlikely(rc == 31))
1238
        return;
1239
    else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1240
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1241
    else {
1242
        TCGv tmp = tcg_temp_new();
1243

    
1244
        /* The instruction description has us left-shift the byte mask
1245
           and extract bits <15:8> and apply that zap at the end.  This
1246
           is equivalent to simply performing the zap first and shifting
1247
           afterward.  */
1248
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1249

    
1250
        if (islit) {
1251
            /* Note that we have handled the lit==0 case above.  */
1252
            tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1253
        } else {
1254
            TCGv shift = tcg_temp_new();
1255

    
1256
            /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1257
               Do this portably by splitting the shift into two parts:
1258
               shift_count-1 and 1.  Arrange for the -1 by using
1259
               ones-complement instead of twos-complement in the negation:
1260
               ~((B & 7) * 8) & 63.  */
1261

    
1262
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1263
            tcg_gen_shli_i64(shift, shift, 3);
1264
            tcg_gen_not_i64(shift, shift);
1265
            tcg_gen_andi_i64(shift, shift, 0x3f);
1266

    
1267
            tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1268
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1269
            tcg_temp_free(shift);
1270
        }
1271
        tcg_temp_free(tmp);
1272
    }
1273
}
1274

    
1275
/* INSBL, INSWL, INSLL, INSQL */
1276
static void gen_ins_l(int ra, int rb, int rc, int islit,
1277
                      uint8_t lit, uint8_t byte_mask)
1278
{
1279
    if (unlikely(rc == 31))
1280
        return;
1281
    else if (unlikely(ra == 31))
1282
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1283
    else {
1284
        TCGv tmp = tcg_temp_new();
1285

    
1286
        /* The instruction description has us left-shift the byte mask
1287
           the same number of byte slots as the data and apply the zap
1288
           at the end.  This is equivalent to simply performing the zap
1289
           first and shifting afterward.  */
1290
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1291

    
1292
        if (islit) {
1293
            tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1294
        } else {
1295
            TCGv shift = tcg_temp_new();
1296
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1297
            tcg_gen_shli_i64(shift, shift, 3);
1298
            tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1299
            tcg_temp_free(shift);
1300
        }
1301
        tcg_temp_free(tmp);
1302
    }
1303
}
1304

    
1305
/* MSKWH, MSKLH, MSKQH */
1306
static void gen_msk_h(int ra, int rb, int rc, int islit,
1307
                      uint8_t lit, uint8_t byte_mask)
1308
{
1309
    if (unlikely(rc == 31))
1310
        return;
1311
    else if (unlikely(ra == 31))
1312
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1313
    else if (islit) {
1314
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1315
    } else {
1316
        TCGv shift = tcg_temp_new();
1317
        TCGv mask = tcg_temp_new();
1318

    
1319
        /* The instruction description is as above, where the byte_mask
1320
           is shifted left, and then we extract bits <15:8>.  This can be
1321
           emulated with a right-shift on the expanded byte mask.  This
1322
           requires extra care because for an input <2:0> == 0 we need a
1323
           shift of 64 bits in order to generate a zero.  This is done by
1324
           splitting the shift into two parts, the variable shift - 1
1325
           followed by a constant 1 shift.  The code we expand below is
1326
           equivalent to ~((B & 7) * 8) & 63.  */
1327

    
1328
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1329
        tcg_gen_shli_i64(shift, shift, 3);
1330
        tcg_gen_not_i64(shift, shift);
1331
        tcg_gen_andi_i64(shift, shift, 0x3f);
1332
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1333
        tcg_gen_shr_i64(mask, mask, shift);
1334
        tcg_gen_shri_i64(mask, mask, 1);
1335

    
1336
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1337

    
1338
        tcg_temp_free(mask);
1339
        tcg_temp_free(shift);
1340
    }
1341
}
1342

    
1343
/* MSKBL, MSKWL, MSKLL, MSKQL */
1344
static void gen_msk_l(int ra, int rb, int rc, int islit,
1345
                      uint8_t lit, uint8_t byte_mask)
1346
{
1347
    if (unlikely(rc == 31))
1348
        return;
1349
    else if (unlikely(ra == 31))
1350
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1351
    else if (islit) {
1352
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1353
    } else {
1354
        TCGv shift = tcg_temp_new();
1355
        TCGv mask = tcg_temp_new();
1356

    
1357
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1358
        tcg_gen_shli_i64(shift, shift, 3);
1359
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1360
        tcg_gen_shl_i64(mask, mask, shift);
1361

    
1362
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1363

    
1364
        tcg_temp_free(mask);
1365
        tcg_temp_free(shift);
1366
    }
1367
}
1368

    
1369
/* Code to call arith3 helpers */
1370
#define ARITH3(name)                                                  \
1371
static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1372
                                    uint8_t lit)                      \
1373
{                                                                     \
1374
    if (unlikely(rc == 31))                                           \
1375
        return;                                                       \
1376
                                                                      \
1377
    if (ra != 31) {                                                   \
1378
        if (islit) {                                                  \
1379
            TCGv tmp = tcg_const_i64(lit);                            \
1380
            gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp);         \
1381
            tcg_temp_free(tmp);                                       \
1382
        } else                                                        \
1383
            gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1384
    } else {                                                          \
1385
        TCGv tmp1 = tcg_const_i64(0);                                 \
1386
        if (islit) {                                                  \
1387
            TCGv tmp2 = tcg_const_i64(lit);                           \
1388
            gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2);             \
1389
            tcg_temp_free(tmp2);                                      \
1390
        } else                                                        \
1391
            gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]);       \
1392
        tcg_temp_free(tmp1);                                          \
1393
    }                                                                 \
1394
}
1395
ARITH3(umulh)
1396
ARITH3(cmpbge)
1397
ARITH3(minub8)
1398
ARITH3(minsb8)
1399
ARITH3(minuw4)
1400
ARITH3(minsw4)
1401
ARITH3(maxub8)
1402
ARITH3(maxsb8)
1403
ARITH3(maxuw4)
1404
ARITH3(maxsw4)
1405
ARITH3(perr)
1406

    
1407
/* Code to call arith3 helpers */
1408
#define ARITH3_EX(name)                                                 \
1409
    static inline void glue(gen_, name)(int ra, int rb, int rc,         \
1410
                                        int islit, uint8_t lit)         \
1411
    {                                                                   \
1412
        if (unlikely(rc == 31)) {                                       \
1413
            return;                                                     \
1414
        }                                                               \
1415
        if (ra != 31) {                                                 \
1416
            if (islit) {                                                \
1417
                TCGv tmp = tcg_const_i64(lit);                          \
1418
                gen_helper_ ## name(cpu_ir[rc], cpu_env,                \
1419
                                    cpu_ir[ra], tmp);                   \
1420
                tcg_temp_free(tmp);                                     \
1421
            } else {                                                    \
1422
                gen_helper_ ## name(cpu_ir[rc], cpu_env,                \
1423
                                    cpu_ir[ra], cpu_ir[rb]);            \
1424
            }                                                           \
1425
        } else {                                                        \
1426
            TCGv tmp1 = tcg_const_i64(0);                               \
1427
            if (islit) {                                                \
1428
                TCGv tmp2 = tcg_const_i64(lit);                         \
1429
                gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2);   \
1430
                tcg_temp_free(tmp2);                                    \
1431
            } else {                                                    \
1432
                gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1433
            }                                                           \
1434
            tcg_temp_free(tmp1);                                        \
1435
        }                                                               \
1436
    }
1437
ARITH3_EX(addlv)
1438
ARITH3_EX(sublv)
1439
ARITH3_EX(addqv)
1440
ARITH3_EX(subqv)
1441
ARITH3_EX(mullv)
1442
ARITH3_EX(mulqv)
1443

    
1444
#define MVIOP2(name)                                    \
1445
static inline void glue(gen_, name)(int rb, int rc)     \
1446
{                                                       \
1447
    if (unlikely(rc == 31))                             \
1448
        return;                                         \
1449
    if (unlikely(rb == 31))                             \
1450
        tcg_gen_movi_i64(cpu_ir[rc], 0);                \
1451
    else                                                \
1452
        gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]);   \
1453
}
1454
MVIOP2(pklb)
1455
MVIOP2(pkwb)
1456
MVIOP2(unpkbl)
1457
MVIOP2(unpkbw)
1458

    
1459
static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1460
                    int islit, uint8_t lit)
1461
{
1462
    TCGv va, vb;
1463

    
1464
    if (unlikely(rc == 31)) {
1465
        return;
1466
    }
1467

    
1468
    if (ra == 31) {
1469
        va = tcg_const_i64(0);
1470
    } else {
1471
        va = cpu_ir[ra];
1472
    }
1473
    if (islit) {
1474
        vb = tcg_const_i64(lit);
1475
    } else {
1476
        vb = cpu_ir[rb];
1477
    }
1478

    
1479
    tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1480

    
1481
    if (ra == 31) {
1482
        tcg_temp_free(va);
1483
    }
1484
    if (islit) {
1485
        tcg_temp_free(vb);
1486
    }
1487
}
1488

    
1489
static void gen_rx(int ra, int set)
1490
{
1491
    TCGv_i32 tmp;
1492

    
1493
    if (ra != 31) {
1494
        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1495
    }
1496

    
1497
    tmp = tcg_const_i32(set);
1498
    tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1499
    tcg_temp_free_i32(tmp);
1500
}
1501

    
1502
static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1503
{
1504
    /* We're emulating OSF/1 PALcode.  Many of these are trivial access
1505
       to internal cpu registers.  */
1506

    
1507
    /* Unprivileged PAL call */
1508
    if (palcode >= 0x80 && palcode < 0xC0) {
1509
        switch (palcode) {
1510
        case 0x86:
1511
            /* IMB */
1512
            /* No-op inside QEMU.  */
1513
            break;
1514
        case 0x9E:
1515
            /* RDUNIQUE */
1516
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1517
            break;
1518
        case 0x9F:
1519
            /* WRUNIQUE */
1520
            tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1521
            break;
1522
        default:
1523
            return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1524
        }
1525
        return NO_EXIT;
1526
    }
1527

    
1528
#ifndef CONFIG_USER_ONLY
1529
    /* Privileged PAL code */
1530
    if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1531
        switch (palcode) {
1532
        case 0x01:
1533
            /* CFLUSH */
1534
            /* No-op inside QEMU.  */
1535
            break;
1536
        case 0x02:
1537
            /* DRAINA */
1538
            /* No-op inside QEMU.  */
1539
            break;
1540
        case 0x2D:
1541
            /* WRVPTPTR */
1542
            tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
1543
            break;
1544
        case 0x31:
1545
            /* WRVAL */
1546
            tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1547
            break;
1548
        case 0x32:
1549
            /* RDVAL */
1550
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1551
            break;
1552

    
1553
        case 0x35: {
1554
            /* SWPIPL */
1555
            TCGv tmp;
1556

    
1557
            /* Note that we already know we're in kernel mode, so we know
1558
               that PS only contains the 3 IPL bits.  */
1559
            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1560

    
1561
            /* But make sure and store only the 3 IPL bits from the user.  */
1562
            tmp = tcg_temp_new();
1563
            tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1564
            tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1565
            tcg_temp_free(tmp);
1566
            break;
1567
        }
1568

    
1569
        case 0x36:
1570
            /* RDPS */
1571
            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1572
            break;
1573
        case 0x38:
1574
            /* WRUSP */
1575
            tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1576
            break;
1577
        case 0x3A:
1578
            /* RDUSP */
1579
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1580
            break;
1581
        case 0x3C:
1582
            /* WHAMI */
1583
            tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1584
                              offsetof(CPUAlphaState, cpu_index));
1585
            break;
1586

    
1587
        default:
1588
            return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1589
        }
1590
        return NO_EXIT;
1591
    }
1592
#endif
1593

    
1594
    return gen_invalid(ctx);
1595
}
1596

    
1597
#ifndef CONFIG_USER_ONLY
1598

    
1599
#define PR_BYTE         0x100000
1600
#define PR_LONG         0x200000
1601

    
1602
static int cpu_pr_data(int pr)
1603
{
1604
    switch (pr) {
1605
    case  0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1606
    case  1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1607
    case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1608
    case  3: return offsetof(CPUAlphaState, trap_arg0);
1609
    case  4: return offsetof(CPUAlphaState, trap_arg1);
1610
    case  5: return offsetof(CPUAlphaState, trap_arg2);
1611
    case  6: return offsetof(CPUAlphaState, exc_addr);
1612
    case  7: return offsetof(CPUAlphaState, palbr);
1613
    case  8: return offsetof(CPUAlphaState, ptbr);
1614
    case  9: return offsetof(CPUAlphaState, vptptr);
1615
    case 10: return offsetof(CPUAlphaState, unique);
1616
    case 11: return offsetof(CPUAlphaState, sysval);
1617
    case 12: return offsetof(CPUAlphaState, usp);
1618

    
1619
    case 32 ... 39:
1620
        return offsetof(CPUAlphaState, shadow[pr - 32]);
1621
    case 40 ... 63:
1622
        return offsetof(CPUAlphaState, scratch[pr - 40]);
1623

    
1624
    case 251:
1625
        return offsetof(CPUAlphaState, alarm_expire);
1626
    }
1627
    return 0;
1628
}
1629

    
1630
static ExitStatus gen_mfpr(int ra, int regno)
1631
{
1632
    int data = cpu_pr_data(regno);
1633

    
1634
    /* In our emulated PALcode, these processor registers have no
1635
       side effects from reading.  */
1636
    if (ra == 31) {
1637
        return NO_EXIT;
1638
    }
1639

    
1640
    if (regno == 250) {
1641
        /* WALL_TIME */
1642
        if (use_icount) {
1643
            gen_io_start();
1644
            gen_helper_get_time(cpu_ir[ra]);
1645
            gen_io_end();
1646
            return EXIT_PC_STALE;
1647
        } else {
1648
            gen_helper_get_time(cpu_ir[ra]);
1649
            return NO_EXIT;
1650
        }
1651
    }
1652

    
1653
    /* The basic registers are data only, and unknown registers
1654
       are read-zero, write-ignore.  */
1655
    if (data == 0) {
1656
        tcg_gen_movi_i64(cpu_ir[ra], 0);
1657
    } else if (data & PR_BYTE) {
1658
        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1659
    } else if (data & PR_LONG) {
1660
        tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1661
    } else {
1662
        tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1663
    }
1664
    return NO_EXIT;
1665
}
1666

    
1667
static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
1668
{
1669
    TCGv tmp;
1670
    int data;
1671

    
1672
    if (rb == 31) {
1673
        tmp = tcg_const_i64(0);
1674
    } else {
1675
        tmp = cpu_ir[rb];
1676
    }
1677

    
1678
    switch (regno) {
1679
    case 255:
1680
        /* TBIA */
1681
        gen_helper_tbia();
1682
        break;
1683

    
1684
    case 254:
1685
        /* TBIS */
1686
        gen_helper_tbis(tmp);
1687
        break;
1688

    
1689
    case 253:
1690
        /* WAIT */
1691
        tmp = tcg_const_i64(1);
1692
        tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUAlphaState, halted));
1693
        return gen_excp(ctx, EXCP_HLT, 0);
1694

    
1695
    case 252:
1696
        /* HALT */
1697
        gen_helper_halt(tmp);
1698
        return EXIT_PC_STALE;
1699

    
1700
    case 251:
1701
        /* ALARM */
1702
        gen_helper_set_alarm(tmp);
1703
        break;
1704

    
1705
    default:
1706
        /* The basic registers are data only, and unknown registers
1707
           are read-zero, write-ignore.  */
1708
        data = cpu_pr_data(regno);
1709
        if (data != 0) {
1710
            if (data & PR_BYTE) {
1711
                tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1712
            } else if (data & PR_LONG) {
1713
                tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1714
            } else {
1715
                tcg_gen_st_i64(tmp, cpu_env, data);
1716
            }
1717
        }
1718
        break;
1719
    }
1720

    
1721
    if (rb == 31) {
1722
        tcg_temp_free(tmp);
1723
    }
1724

    
1725
    return NO_EXIT;
1726
}
1727
#endif /* !USER_ONLY*/
1728

    
1729
static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1730
{
1731
    uint32_t palcode;
1732
    int32_t disp21, disp16;
1733
#ifndef CONFIG_USER_ONLY
1734
    int32_t disp12;
1735
#endif
1736
    uint16_t fn11;
1737
    uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
1738
    uint8_t lit;
1739
    ExitStatus ret;
1740

    
1741
    /* Decode all instruction fields */
1742
    opc = insn >> 26;
1743
    ra = (insn >> 21) & 0x1F;
1744
    rb = (insn >> 16) & 0x1F;
1745
    rc = insn & 0x1F;
1746
    real_islit = islit = (insn >> 12) & 1;
1747
    if (rb == 31 && !islit) {
1748
        islit = 1;
1749
        lit = 0;
1750
    } else
1751
        lit = (insn >> 13) & 0xFF;
1752
    palcode = insn & 0x03FFFFFF;
1753
    disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1754
    disp16 = (int16_t)(insn & 0x0000FFFF);
1755
#ifndef CONFIG_USER_ONLY
1756
    disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1757
#endif
1758
    fn11 = (insn >> 5) & 0x000007FF;
1759
    fpfn = fn11 & 0x3F;
1760
    fn7 = (insn >> 5) & 0x0000007F;
1761
    LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1762
              opc, ra, rb, rc, disp16);
1763

    
1764
    ret = NO_EXIT;
1765
    switch (opc) {
1766
    case 0x00:
1767
        /* CALL_PAL */
1768
        ret = gen_call_pal(ctx, palcode);
1769
        break;
1770
    case 0x01:
1771
        /* OPC01 */
1772
        goto invalid_opc;
1773
    case 0x02:
1774
        /* OPC02 */
1775
        goto invalid_opc;
1776
    case 0x03:
1777
        /* OPC03 */
1778
        goto invalid_opc;
1779
    case 0x04:
1780
        /* OPC04 */
1781
        goto invalid_opc;
1782
    case 0x05:
1783
        /* OPC05 */
1784
        goto invalid_opc;
1785
    case 0x06:
1786
        /* OPC06 */
1787
        goto invalid_opc;
1788
    case 0x07:
1789
        /* OPC07 */
1790
        goto invalid_opc;
1791
    case 0x08:
1792
        /* LDA */
1793
        if (likely(ra != 31)) {
1794
            if (rb != 31)
1795
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1796
            else
1797
                tcg_gen_movi_i64(cpu_ir[ra], disp16);
1798
        }
1799
        break;
1800
    case 0x09:
1801
        /* LDAH */
1802
        if (likely(ra != 31)) {
1803
            if (rb != 31)
1804
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1805
            else
1806
                tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1807
        }
1808
        break;
1809
    case 0x0A:
1810
        /* LDBU */
1811
        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1812
            gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1813
            break;
1814
        }
1815
        goto invalid_opc;
1816
    case 0x0B:
1817
        /* LDQ_U */
1818
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1819
        break;
1820
    case 0x0C:
1821
        /* LDWU */
1822
        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1823
            gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1824
            break;
1825
        }
1826
        goto invalid_opc;
1827
    case 0x0D:
1828
        /* STW */
1829
        gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1830
        break;
1831
    case 0x0E:
1832
        /* STB */
1833
        gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1834
        break;
1835
    case 0x0F:
1836
        /* STQ_U */
1837
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1838
        break;
1839
    case 0x10:
1840
        switch (fn7) {
1841
        case 0x00:
1842
            /* ADDL */
1843
            if (likely(rc != 31)) {
1844
                if (ra != 31) {
1845
                    if (islit) {
1846
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1847
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1848
                    } else {
1849
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1850
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1851
                    }
1852
                } else {
1853
                    if (islit)
1854
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1855
                    else
1856
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1857
                }
1858
            }
1859
            break;
1860
        case 0x02:
1861
            /* S4ADDL */
1862
            if (likely(rc != 31)) {
1863
                if (ra != 31) {
1864
                    TCGv tmp = tcg_temp_new();
1865
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1866
                    if (islit)
1867
                        tcg_gen_addi_i64(tmp, tmp, lit);
1868
                    else
1869
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1870
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1871
                    tcg_temp_free(tmp);
1872
                } else {
1873
                    if (islit)
1874
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1875
                    else
1876
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1877
                }
1878
            }
1879
            break;
1880
        case 0x09:
1881
            /* SUBL */
1882
            if (likely(rc != 31)) {
1883
                if (ra != 31) {
1884
                    if (islit)
1885
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1886
                    else
1887
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1888
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1889
                } else {
1890
                    if (islit)
1891
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1892
                    else {
1893
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1894
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1895
                }
1896
            }
1897
            break;
1898
        case 0x0B:
1899
            /* S4SUBL */
1900
            if (likely(rc != 31)) {
1901
                if (ra != 31) {
1902
                    TCGv tmp = tcg_temp_new();
1903
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1904
                    if (islit)
1905
                        tcg_gen_subi_i64(tmp, tmp, lit);
1906
                    else
1907
                        tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1908
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1909
                    tcg_temp_free(tmp);
1910
                } else {
1911
                    if (islit)
1912
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1913
                    else {
1914
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1915
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1916
                    }
1917
                }
1918
            }
1919
            break;
1920
        case 0x0F:
1921
            /* CMPBGE */
1922
            gen_cmpbge(ra, rb, rc, islit, lit);
1923
            break;
1924
        case 0x12:
1925
            /* S8ADDL */
1926
            if (likely(rc != 31)) {
1927
                if (ra != 31) {
1928
                    TCGv tmp = tcg_temp_new();
1929
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1930
                    if (islit)
1931
                        tcg_gen_addi_i64(tmp, tmp, lit);
1932
                    else
1933
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1934
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1935
                    tcg_temp_free(tmp);
1936
                } else {
1937
                    if (islit)
1938
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1939
                    else
1940
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1941
                }
1942
            }
1943
            break;
1944
        case 0x1B:
1945
            /* S8SUBL */
1946
            if (likely(rc != 31)) {
1947
                if (ra != 31) {
1948
                    TCGv tmp = tcg_temp_new();
1949
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1950
                    if (islit)
1951
                        tcg_gen_subi_i64(tmp, tmp, lit);
1952
                    else
1953
                       tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1954
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1955
                    tcg_temp_free(tmp);
1956
                } else {
1957
                    if (islit)
1958
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1959
                    else
1960
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1961
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1962
                    }
1963
                }
1964
            }
1965
            break;
1966
        case 0x1D:
1967
            /* CMPULT */
1968
            gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1969
            break;
1970
        case 0x20:
1971
            /* ADDQ */
1972
            if (likely(rc != 31)) {
1973
                if (ra != 31) {
1974
                    if (islit)
1975
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1976
                    else
1977
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1978
                } else {
1979
                    if (islit)
1980
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1981
                    else
1982
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1983
                }
1984
            }
1985
            break;
1986
        case 0x22:
1987
            /* S4ADDQ */
1988
            if (likely(rc != 31)) {
1989
                if (ra != 31) {
1990
                    TCGv tmp = tcg_temp_new();
1991
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1992
                    if (islit)
1993
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1994
                    else
1995
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1996
                    tcg_temp_free(tmp);
1997
                } else {
1998
                    if (islit)
1999
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2000
                    else
2001
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2002
                }
2003
            }
2004
            break;
2005
        case 0x29:
2006
            /* SUBQ */
2007
            if (likely(rc != 31)) {
2008
                if (ra != 31) {
2009
                    if (islit)
2010
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2011
                    else
2012
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2013
                } else {
2014
                    if (islit)
2015
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
2016
                    else
2017
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2018
                }
2019
            }
2020
            break;
2021
        case 0x2B:
2022
            /* S4SUBQ */
2023
            if (likely(rc != 31)) {
2024
                if (ra != 31) {
2025
                    TCGv tmp = tcg_temp_new();
2026
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2027
                    if (islit)
2028
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2029
                    else
2030
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2031
                    tcg_temp_free(tmp);
2032
                } else {
2033
                    if (islit)
2034
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
2035
                    else
2036
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2037
                }
2038
            }
2039
            break;
2040
        case 0x2D:
2041
            /* CMPEQ */
2042
            gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
2043
            break;
2044
        case 0x32:
2045
            /* S8ADDQ */
2046
            if (likely(rc != 31)) {
2047
                if (ra != 31) {
2048
                    TCGv tmp = tcg_temp_new();
2049
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2050
                    if (islit)
2051
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2052
                    else
2053
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2054
                    tcg_temp_free(tmp);
2055
                } else {
2056
                    if (islit)
2057
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2058
                    else
2059
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2060
                }
2061
            }
2062
            break;
2063
        case 0x3B:
2064
            /* S8SUBQ */
2065
            if (likely(rc != 31)) {
2066
                if (ra != 31) {
2067
                    TCGv tmp = tcg_temp_new();
2068
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2069
                    if (islit)
2070
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2071
                    else
2072
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2073
                    tcg_temp_free(tmp);
2074
                } else {
2075
                    if (islit)
2076
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
2077
                    else
2078
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2079
                }
2080
            }
2081
            break;
2082
        case 0x3D:
2083
            /* CMPULE */
2084
            gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2085
            break;
2086
        case 0x40:
2087
            /* ADDL/V */
2088
            gen_addlv(ra, rb, rc, islit, lit);
2089
            break;
2090
        case 0x49:
2091
            /* SUBL/V */
2092
            gen_sublv(ra, rb, rc, islit, lit);
2093
            break;
2094
        case 0x4D:
2095
            /* CMPLT */
2096
            gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2097
            break;
2098
        case 0x60:
2099
            /* ADDQ/V */
2100
            gen_addqv(ra, rb, rc, islit, lit);
2101
            break;
2102
        case 0x69:
2103
            /* SUBQ/V */
2104
            gen_subqv(ra, rb, rc, islit, lit);
2105
            break;
2106
        case 0x6D:
2107
            /* CMPLE */
2108
            gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2109
            break;
2110
        default:
2111
            goto invalid_opc;
2112
        }
2113
        break;
2114
    case 0x11:
2115
        switch (fn7) {
2116
        case 0x00:
2117
            /* AND */
2118
            if (likely(rc != 31)) {
2119
                if (ra == 31)
2120
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2121
                else if (islit)
2122
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2123
                else
2124
                    tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2125
            }
2126
            break;
2127
        case 0x08:
2128
            /* BIC */
2129
            if (likely(rc != 31)) {
2130
                if (ra != 31) {
2131
                    if (islit)
2132
                        tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2133
                    else
2134
                        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2135
                } else
2136
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2137
            }
2138
            break;
2139
        case 0x14:
2140
            /* CMOVLBS */
2141
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2142
            break;
2143
        case 0x16:
2144
            /* CMOVLBC */
2145
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2146
            break;
2147
        case 0x20:
2148
            /* BIS */
2149
            if (likely(rc != 31)) {
2150
                if (ra != 31) {
2151
                    if (islit)
2152
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2153
                    else
2154
                        tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2155
                } else {
2156
                    if (islit)
2157
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2158
                    else
2159
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2160
                }
2161
            }
2162
            break;
2163
        case 0x24:
2164
            /* CMOVEQ */
2165
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2166
            break;
2167
        case 0x26:
2168
            /* CMOVNE */
2169
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2170
            break;
2171
        case 0x28:
2172
            /* ORNOT */
2173
            if (likely(rc != 31)) {
2174
                if (ra != 31) {
2175
                    if (islit)
2176
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2177
                    else
2178
                        tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2179
                } else {
2180
                    if (islit)
2181
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2182
                    else
2183
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2184
                }
2185
            }
2186
            break;
2187
        case 0x40:
2188
            /* XOR */
2189
            if (likely(rc != 31)) {
2190
                if (ra != 31) {
2191
                    if (islit)
2192
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2193
                    else
2194
                        tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2195
                } else {
2196
                    if (islit)
2197
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2198
                    else
2199
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2200
                }
2201
            }
2202
            break;
2203
        case 0x44:
2204
            /* CMOVLT */
2205
            gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2206
            break;
2207
        case 0x46:
2208
            /* CMOVGE */
2209
            gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2210
            break;
2211
        case 0x48:
2212
            /* EQV */
2213
            if (likely(rc != 31)) {
2214
                if (ra != 31) {
2215
                    if (islit)
2216
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2217
                    else
2218
                        tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2219
                } else {
2220
                    if (islit)
2221
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2222
                    else
2223
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2224
                }
2225
            }
2226
            break;
2227
        case 0x61:
2228
            /* AMASK */
2229
            if (likely(rc != 31)) {
2230
                uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2231

    
2232
                if (islit) {
2233
                    tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2234
                } else {
2235
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2236
                }
2237
            }
2238
            break;
2239
        case 0x64:
2240
            /* CMOVLE */
2241
            gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2242
            break;
2243
        case 0x66:
2244
            /* CMOVGT */
2245
            gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2246
            break;
2247
        case 0x6C:
2248
            /* IMPLVER */
2249
            if (rc != 31)
2250
                tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2251
            break;
2252
        default:
2253
            goto invalid_opc;
2254
        }
2255
        break;
2256
    case 0x12:
2257
        switch (fn7) {
2258
        case 0x02:
2259
            /* MSKBL */
2260
            gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2261
            break;
2262
        case 0x06:
2263
            /* EXTBL */
2264
            gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2265
            break;
2266
        case 0x0B:
2267
            /* INSBL */
2268
            gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2269
            break;
2270
        case 0x12:
2271
            /* MSKWL */
2272
            gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2273
            break;
2274
        case 0x16:
2275
            /* EXTWL */
2276
            gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2277
            break;
2278
        case 0x1B:
2279
            /* INSWL */
2280
            gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2281
            break;
2282
        case 0x22:
2283
            /* MSKLL */
2284
            gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2285
            break;
2286
        case 0x26:
2287
            /* EXTLL */
2288
            gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2289
            break;
2290
        case 0x2B:
2291
            /* INSLL */
2292
            gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2293
            break;
2294
        case 0x30:
2295
            /* ZAP */
2296
            gen_zap(ra, rb, rc, islit, lit);
2297
            break;
2298
        case 0x31:
2299
            /* ZAPNOT */
2300
            gen_zapnot(ra, rb, rc, islit, lit);
2301
            break;
2302
        case 0x32:
2303
            /* MSKQL */
2304
            gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2305
            break;
2306
        case 0x34:
2307
            /* SRL */
2308
            if (likely(rc != 31)) {
2309
                if (ra != 31) {
2310
                    if (islit)
2311
                        tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2312
                    else {
2313
                        TCGv shift = tcg_temp_new();
2314
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2315
                        tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2316
                        tcg_temp_free(shift);
2317
                    }
2318
                } else
2319
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2320
            }
2321
            break;
2322
        case 0x36:
2323
            /* EXTQL */
2324
            gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2325
            break;
2326
        case 0x39:
2327
            /* SLL */
2328
            if (likely(rc != 31)) {
2329
                if (ra != 31) {
2330
                    if (islit)
2331
                        tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2332
                    else {
2333
                        TCGv shift = tcg_temp_new();
2334
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2335
                        tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2336
                        tcg_temp_free(shift);
2337
                    }
2338
                } else
2339
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2340
            }
2341
            break;
2342
        case 0x3B:
2343
            /* INSQL */
2344
            gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2345
            break;
2346
        case 0x3C:
2347
            /* SRA */
2348
            if (likely(rc != 31)) {
2349
                if (ra != 31) {
2350
                    if (islit)
2351
                        tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2352
                    else {
2353
                        TCGv shift = tcg_temp_new();
2354
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2355
                        tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2356
                        tcg_temp_free(shift);
2357
                    }
2358
                } else
2359
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2360
            }
2361
            break;
2362
        case 0x52:
2363
            /* MSKWH */
2364
            gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2365
            break;
2366
        case 0x57:
2367
            /* INSWH */
2368
            gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2369
            break;
2370
        case 0x5A:
2371
            /* EXTWH */
2372
            gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2373
            break;
2374
        case 0x62:
2375
            /* MSKLH */
2376
            gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2377
            break;
2378
        case 0x67:
2379
            /* INSLH */
2380
            gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2381
            break;
2382
        case 0x6A:
2383
            /* EXTLH */
2384
            gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2385
            break;
2386
        case 0x72:
2387
            /* MSKQH */
2388
            gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2389
            break;
2390
        case 0x77:
2391
            /* INSQH */
2392
            gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2393
            break;
2394
        case 0x7A:
2395
            /* EXTQH */
2396
            gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2397
            break;
2398
        default:
2399
            goto invalid_opc;
2400
        }
2401
        break;
2402
    case 0x13:
2403
        switch (fn7) {
2404
        case 0x00:
2405
            /* MULL */
2406
            if (likely(rc != 31)) {
2407
                if (ra == 31)
2408
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2409
                else {
2410
                    if (islit)
2411
                        tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2412
                    else
2413
                        tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2414
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2415
                }
2416
            }
2417
            break;
2418
        case 0x20:
2419
            /* MULQ */
2420
            if (likely(rc != 31)) {
2421
                if (ra == 31)
2422
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2423
                else if (islit)
2424
                    tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2425
                else
2426
                    tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2427
            }
2428
            break;
2429
        case 0x30:
2430
            /* UMULH */
2431
            gen_umulh(ra, rb, rc, islit, lit);
2432
            break;
2433
        case 0x40:
2434
            /* MULL/V */
2435
            gen_mullv(ra, rb, rc, islit, lit);
2436
            break;
2437
        case 0x60:
2438
            /* MULQ/V */
2439
            gen_mulqv(ra, rb, rc, islit, lit);
2440
            break;
2441
        default:
2442
            goto invalid_opc;
2443
        }
2444
        break;
2445
    case 0x14:
2446
        switch (fpfn) { /* fn11 & 0x3F */
2447
        case 0x04:
2448
            /* ITOFS */
2449
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2450
                goto invalid_opc;
2451
            }
2452
            if (likely(rc != 31)) {
2453
                if (ra != 31) {
2454
                    TCGv_i32 tmp = tcg_temp_new_i32();
2455
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2456
                    gen_helper_memory_to_s(cpu_fir[rc], tmp);
2457
                    tcg_temp_free_i32(tmp);
2458
                } else
2459
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2460
            }
2461
            break;
2462
        case 0x0A:
2463
            /* SQRTF */
2464
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2465
                gen_fsqrtf(rb, rc);
2466
                break;
2467
            }
2468
            goto invalid_opc;
2469
        case 0x0B:
2470
            /* SQRTS */
2471
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2472
                gen_fsqrts(ctx, rb, rc, fn11);
2473
                break;
2474
            }
2475
            goto invalid_opc;
2476
        case 0x14:
2477
            /* ITOFF */
2478
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2479
                goto invalid_opc;
2480
            }
2481
            if (likely(rc != 31)) {
2482
                if (ra != 31) {
2483
                    TCGv_i32 tmp = tcg_temp_new_i32();
2484
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2485
                    gen_helper_memory_to_f(cpu_fir[rc], tmp);
2486
                    tcg_temp_free_i32(tmp);
2487
                } else
2488
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2489
            }
2490
            break;
2491
        case 0x24:
2492
            /* ITOFT */
2493
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2494
                goto invalid_opc;
2495
            }
2496
            if (likely(rc != 31)) {
2497
                if (ra != 31)
2498
                    tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2499
                else
2500
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2501
            }
2502
            break;
2503
        case 0x2A:
2504
            /* SQRTG */
2505
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2506
                gen_fsqrtg(rb, rc);
2507
                break;
2508
            }
2509
            goto invalid_opc;
2510
        case 0x02B:
2511
            /* SQRTT */
2512
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2513
                gen_fsqrtt(ctx, rb, rc, fn11);
2514
                break;
2515
            }
2516
            goto invalid_opc;
2517
        default:
2518
            goto invalid_opc;
2519
        }
2520
        break;
2521
    case 0x15:
2522
        /* VAX floating point */
2523
        /* XXX: rounding mode and trap are ignored (!) */
2524
        switch (fpfn) { /* fn11 & 0x3F */
2525
        case 0x00:
2526
            /* ADDF */
2527
            gen_faddf(ra, rb, rc);
2528
            break;
2529
        case 0x01:
2530
            /* SUBF */
2531
            gen_fsubf(ra, rb, rc);
2532
            break;
2533
        case 0x02:
2534
            /* MULF */
2535
            gen_fmulf(ra, rb, rc);
2536
            break;
2537
        case 0x03:
2538
            /* DIVF */
2539
            gen_fdivf(ra, rb, rc);
2540
            break;
2541
        case 0x1E:
2542
            /* CVTDG */
2543
#if 0 // TODO
2544
            gen_fcvtdg(rb, rc);
2545
#else
2546
            goto invalid_opc;
2547
#endif
2548
            break;
2549
        case 0x20:
2550
            /* ADDG */
2551
            gen_faddg(ra, rb, rc);
2552
            break;
2553
        case 0x21:
2554
            /* SUBG */
2555
            gen_fsubg(ra, rb, rc);
2556
            break;
2557
        case 0x22:
2558
            /* MULG */
2559
            gen_fmulg(ra, rb, rc);
2560
            break;
2561
        case 0x23:
2562
            /* DIVG */
2563
            gen_fdivg(ra, rb, rc);
2564
            break;
2565
        case 0x25:
2566
            /* CMPGEQ */
2567
            gen_fcmpgeq(ra, rb, rc);
2568
            break;
2569
        case 0x26:
2570
            /* CMPGLT */
2571
            gen_fcmpglt(ra, rb, rc);
2572
            break;
2573
        case 0x27:
2574
            /* CMPGLE */
2575
            gen_fcmpgle(ra, rb, rc);
2576
            break;
2577
        case 0x2C:
2578
            /* CVTGF */
2579
            gen_fcvtgf(rb, rc);
2580
            break;
2581
        case 0x2D:
2582
            /* CVTGD */
2583
#if 0 // TODO
2584
            gen_fcvtgd(rb, rc);
2585
#else
2586
            goto invalid_opc;
2587
#endif
2588
            break;
2589
        case 0x2F:
2590
            /* CVTGQ */
2591
            gen_fcvtgq(rb, rc);
2592
            break;
2593
        case 0x3C:
2594
            /* CVTQF */
2595
            gen_fcvtqf(rb, rc);
2596
            break;
2597
        case 0x3E:
2598
            /* CVTQG */
2599
            gen_fcvtqg(rb, rc);
2600
            break;
2601
        default:
2602
            goto invalid_opc;
2603
        }
2604
        break;
2605
    case 0x16:
2606
        /* IEEE floating-point */
2607
        switch (fpfn) { /* fn11 & 0x3F */
2608
        case 0x00:
2609
            /* ADDS */
2610
            gen_fadds(ctx, ra, rb, rc, fn11);
2611
            break;
2612
        case 0x01:
2613
            /* SUBS */
2614
            gen_fsubs(ctx, ra, rb, rc, fn11);
2615
            break;
2616
        case 0x02:
2617
            /* MULS */
2618
            gen_fmuls(ctx, ra, rb, rc, fn11);
2619
            break;
2620
        case 0x03:
2621
            /* DIVS */
2622
            gen_fdivs(ctx, ra, rb, rc, fn11);
2623
            break;
2624
        case 0x20:
2625
            /* ADDT */
2626
            gen_faddt(ctx, ra, rb, rc, fn11);
2627
            break;
2628
        case 0x21:
2629
            /* SUBT */
2630
            gen_fsubt(ctx, ra, rb, rc, fn11);
2631
            break;
2632
        case 0x22:
2633
            /* MULT */
2634
            gen_fmult(ctx, ra, rb, rc, fn11);
2635
            break;
2636
        case 0x23:
2637
            /* DIVT */
2638
            gen_fdivt(ctx, ra, rb, rc, fn11);
2639
            break;
2640
        case 0x24:
2641
            /* CMPTUN */
2642
            gen_fcmptun(ctx, ra, rb, rc, fn11);
2643
            break;
2644
        case 0x25:
2645
            /* CMPTEQ */
2646
            gen_fcmpteq(ctx, ra, rb, rc, fn11);
2647
            break;
2648
        case 0x26:
2649
            /* CMPTLT */
2650
            gen_fcmptlt(ctx, ra, rb, rc, fn11);
2651
            break;
2652
        case 0x27:
2653
            /* CMPTLE */
2654
            gen_fcmptle(ctx, ra, rb, rc, fn11);
2655
            break;
2656
        case 0x2C:
2657
            if (fn11 == 0x2AC || fn11 == 0x6AC) {
2658
                /* CVTST */
2659
                gen_fcvtst(ctx, rb, rc, fn11);
2660
            } else {
2661
                /* CVTTS */
2662
                gen_fcvtts(ctx, rb, rc, fn11);
2663
            }
2664
            break;
2665
        case 0x2F:
2666
            /* CVTTQ */
2667
            gen_fcvttq(ctx, rb, rc, fn11);
2668
            break;
2669
        case 0x3C:
2670
            /* CVTQS */
2671
            gen_fcvtqs(ctx, rb, rc, fn11);
2672
            break;
2673
        case 0x3E:
2674
            /* CVTQT */
2675
            gen_fcvtqt(ctx, rb, rc, fn11);
2676
            break;
2677
        default:
2678
            goto invalid_opc;
2679
        }
2680
        break;
2681
    case 0x17:
2682
        switch (fn11) {
2683
        case 0x010:
2684
            /* CVTLQ */
2685
            gen_fcvtlq(rb, rc);
2686
            break;
2687
        case 0x020:
2688
            if (likely(rc != 31)) {
2689
                if (ra == rb) {
2690
                    /* FMOV */
2691
                    if (ra == 31)
2692
                        tcg_gen_movi_i64(cpu_fir[rc], 0);
2693
                    else
2694
                        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2695
                } else {
2696
                    /* CPYS */
2697
                    gen_fcpys(ra, rb, rc);
2698
                }
2699
            }
2700
            break;
2701
        case 0x021:
2702
            /* CPYSN */
2703
            gen_fcpysn(ra, rb, rc);
2704
            break;
2705
        case 0x022:
2706
            /* CPYSE */
2707
            gen_fcpyse(ra, rb, rc);
2708
            break;
2709
        case 0x024:
2710
            /* MT_FPCR */
2711
            if (likely(ra != 31))
2712
                gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
2713
            else {
2714
                TCGv tmp = tcg_const_i64(0);
2715
                gen_helper_store_fpcr(cpu_env, tmp);
2716
                tcg_temp_free(tmp);
2717
            }
2718
            break;
2719
        case 0x025:
2720
            /* MF_FPCR */
2721
            if (likely(ra != 31))
2722
                gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
2723
            break;
2724
        case 0x02A:
2725
            /* FCMOVEQ */
2726
            gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2727
            break;
2728
        case 0x02B:
2729
            /* FCMOVNE */
2730
            gen_fcmov(TCG_COND_NE, ra, rb, rc);
2731
            break;
2732
        case 0x02C:
2733
            /* FCMOVLT */
2734
            gen_fcmov(TCG_COND_LT, ra, rb, rc);
2735
            break;
2736
        case 0x02D:
2737
            /* FCMOVGE */
2738
            gen_fcmov(TCG_COND_GE, ra, rb, rc);
2739
            break;
2740
        case 0x02E:
2741
            /* FCMOVLE */
2742
            gen_fcmov(TCG_COND_LE, ra, rb, rc);
2743
            break;
2744
        case 0x02F:
2745
            /* FCMOVGT */
2746
            gen_fcmov(TCG_COND_GT, ra, rb, rc);
2747
            break;
2748
        case 0x030:
2749
            /* CVTQL */
2750
            gen_fcvtql(rb, rc);
2751
            break;
2752
        case 0x130:
2753
            /* CVTQL/V */
2754
        case 0x530:
2755
            /* CVTQL/SV */
2756
            /* ??? I'm pretty sure there's nothing that /sv needs to do that
2757
               /v doesn't do.  The only thing I can think is that /sv is a
2758
               valid instruction merely for completeness in the ISA.  */
2759
            gen_fcvtql_v(ctx, rb, rc);
2760
            break;
2761
        default:
2762
            goto invalid_opc;
2763
        }
2764
        break;
2765
    case 0x18:
2766
        switch ((uint16_t)disp16) {
2767
        case 0x0000:
2768
            /* TRAPB */
2769
            /* No-op.  */
2770
            break;
2771
        case 0x0400:
2772
            /* EXCB */
2773
            /* No-op.  */
2774
            break;
2775
        case 0x4000:
2776
            /* MB */
2777
            /* No-op */
2778
            break;
2779
        case 0x4400:
2780
            /* WMB */
2781
            /* No-op */
2782
            break;
2783
        case 0x8000:
2784
            /* FETCH */
2785
            /* No-op */
2786
            break;
2787
        case 0xA000:
2788
            /* FETCH_M */
2789
            /* No-op */
2790
            break;
2791
        case 0xC000:
2792
            /* RPCC */
2793
            if (ra != 31) {
2794
                if (use_icount) {
2795
                    gen_io_start();
2796
                    gen_helper_load_pcc(cpu_ir[ra]);
2797
                    gen_io_end();
2798
                    ret = EXIT_PC_STALE;
2799
                } else {
2800
                    gen_helper_load_pcc(cpu_ir[ra]);
2801
                }
2802
            }
2803
            break;
2804
        case 0xE000:
2805
            /* RC */
2806
            gen_rx(ra, 0);
2807
            break;
2808
        case 0xE800:
2809
            /* ECB */
2810
            break;
2811
        case 0xF000:
2812
            /* RS */
2813
            gen_rx(ra, 1);
2814
            break;
2815
        case 0xF800:
2816
            /* WH64 */
2817
            /* No-op */
2818
            break;
2819
        default:
2820
            goto invalid_opc;
2821
        }
2822
        break;
2823
    case 0x19:
2824
        /* HW_MFPR (PALcode) */
2825
#ifndef CONFIG_USER_ONLY
2826
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2827
            return gen_mfpr(ra, insn & 0xffff);
2828
        }
2829
#endif
2830
        goto invalid_opc;
2831
    case 0x1A:
2832
        /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2833
           prediction stack action, which of course we don't implement.  */
2834
        if (rb != 31) {
2835
            tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2836
        } else {
2837
            tcg_gen_movi_i64(cpu_pc, 0);
2838
        }
2839
        if (ra != 31) {
2840
            tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2841
        }
2842
        ret = EXIT_PC_UPDATED;
2843
        break;
2844
    case 0x1B:
2845
        /* HW_LD (PALcode) */
2846
#ifndef CONFIG_USER_ONLY
2847
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2848
            TCGv addr;
2849

    
2850
            if (ra == 31) {
2851
                break;
2852
            }
2853

    
2854
            addr = tcg_temp_new();
2855
            if (rb != 31)
2856
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2857
            else
2858
                tcg_gen_movi_i64(addr, disp12);
2859
            switch ((insn >> 12) & 0xF) {
2860
            case 0x0:
2861
                /* Longword physical access (hw_ldl/p) */
2862
                gen_helper_ldl_phys(cpu_ir[ra], addr);
2863
                break;
2864
            case 0x1:
2865
                /* Quadword physical access (hw_ldq/p) */
2866
                gen_helper_ldq_phys(cpu_ir[ra], addr);
2867
                break;
2868
            case 0x2:
2869
                /* Longword physical access with lock (hw_ldl_l/p) */
2870
                gen_helper_ldl_l_phys(cpu_ir[ra], addr);
2871
                break;
2872
            case 0x3:
2873
                /* Quadword physical access with lock (hw_ldq_l/p) */
2874
                gen_helper_ldq_l_phys(cpu_ir[ra], addr);
2875
                break;
2876
            case 0x4:
2877
                /* Longword virtual PTE fetch (hw_ldl/v) */
2878
                goto invalid_opc;
2879
            case 0x5:
2880
                /* Quadword virtual PTE fetch (hw_ldq/v) */
2881
                goto invalid_opc;
2882
                break;
2883
            case 0x6:
2884
                /* Incpu_ir[ra]id */
2885
                goto invalid_opc;
2886
            case 0x7:
2887
                /* Incpu_ir[ra]id */
2888
                goto invalid_opc;
2889
            case 0x8:
2890
                /* Longword virtual access (hw_ldl) */
2891
                goto invalid_opc;
2892
            case 0x9:
2893
                /* Quadword virtual access (hw_ldq) */
2894
                goto invalid_opc;
2895
            case 0xA:
2896
                /* Longword virtual access with protection check (hw_ldl/w) */
2897
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2898
                break;
2899
            case 0xB:
2900
                /* Quadword virtual access with protection check (hw_ldq/w) */
2901
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2902
                break;
2903
            case 0xC:
2904
                /* Longword virtual access with alt access mode (hw_ldl/a)*/
2905
                goto invalid_opc;
2906
            case 0xD:
2907
                /* Quadword virtual access with alt access mode (hw_ldq/a) */
2908
                goto invalid_opc;
2909
            case 0xE:
2910
                /* Longword virtual access with alternate access mode and
2911
                   protection checks (hw_ldl/wa) */
2912
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2913
                break;
2914
            case 0xF:
2915
                /* Quadword virtual access with alternate access mode and
2916
                   protection checks (hw_ldq/wa) */
2917
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2918
                break;
2919
            }
2920
            tcg_temp_free(addr);
2921
            break;
2922
        }
2923
#endif
2924
        goto invalid_opc;
2925
    case 0x1C:
2926
        switch (fn7) {
2927
        case 0x00:
2928
            /* SEXTB */
2929
            if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
2930
                goto invalid_opc;
2931
            }
2932
            if (likely(rc != 31)) {
2933
                if (islit)
2934
                    tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2935
                else
2936
                    tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2937
            }
2938
            break;
2939
        case 0x01:
2940
            /* SEXTW */
2941
            if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2942
                if (likely(rc != 31)) {
2943
                    if (islit) {
2944
                        tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2945
                    } else {
2946
                        tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2947
                    }
2948
                }
2949
                break;
2950
            }
2951
            goto invalid_opc;
2952
        case 0x30:
2953
            /* CTPOP */
2954
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2955
                if (likely(rc != 31)) {
2956
                    if (islit) {
2957
                        tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2958
                    } else {
2959
                        gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2960
                    }
2961
                }
2962
                break;
2963
            }
2964
            goto invalid_opc;
2965
        case 0x31:
2966
            /* PERR */
2967
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2968
                gen_perr(ra, rb, rc, islit, lit);
2969
                break;
2970
            }
2971
            goto invalid_opc;
2972
        case 0x32:
2973
            /* CTLZ */
2974
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2975
                if (likely(rc != 31)) {
2976
                    if (islit) {
2977
                        tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2978
                    } else {
2979
                        gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2980
                    }
2981
                }
2982
                break;
2983
            }
2984
            goto invalid_opc;
2985
        case 0x33:
2986
            /* CTTZ */
2987
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2988
                if (likely(rc != 31)) {
2989
                    if (islit) {
2990
                        tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2991
                    } else {
2992
                        gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2993
                    }
2994
                }
2995
                break;
2996
            }
2997
            goto invalid_opc;
2998
        case 0x34:
2999
            /* UNPKBW */
3000
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3001
                if (real_islit || ra != 31) {
3002
                    goto invalid_opc;
3003
                }
3004
                gen_unpkbw(rb, rc);
3005
                break;
3006
            }
3007
            goto invalid_opc;
3008
        case 0x35:
3009
            /* UNPKBL */
3010
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3011
                if (real_islit || ra != 31) {
3012
                    goto invalid_opc;
3013
                }
3014
                gen_unpkbl(rb, rc);
3015
                break;
3016
            }
3017
            goto invalid_opc;
3018
        case 0x36:
3019
            /* PKWB */
3020
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3021
                if (real_islit || ra != 31) {
3022
                    goto invalid_opc;
3023
                }
3024
                gen_pkwb(rb, rc);
3025
                break;
3026
            }
3027
            goto invalid_opc;
3028
        case 0x37:
3029
            /* PKLB */
3030
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3031
                if (real_islit || ra != 31) {
3032
                    goto invalid_opc;
3033
                }
3034
                gen_pklb(rb, rc);
3035
                break;
3036
            }
3037
            goto invalid_opc;
3038
        case 0x38:
3039
            /* MINSB8 */
3040
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3041
                gen_minsb8(ra, rb, rc, islit, lit);
3042
                break;
3043
            }
3044
            goto invalid_opc;
3045
        case 0x39:
3046
            /* MINSW4 */
3047
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3048
                gen_minsw4(ra, rb, rc, islit, lit);
3049
                break;
3050
            }
3051
            goto invalid_opc;
3052
        case 0x3A:
3053
            /* MINUB8 */
3054
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3055
                gen_minub8(ra, rb, rc, islit, lit);
3056
                break;
3057
            }
3058
            goto invalid_opc;
3059
        case 0x3B:
3060
            /* MINUW4 */
3061
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3062
                gen_minuw4(ra, rb, rc, islit, lit);
3063
                break;
3064
            }
3065
            goto invalid_opc;
3066
        case 0x3C:
3067
            /* MAXUB8 */
3068
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3069
                gen_maxub8(ra, rb, rc, islit, lit);
3070
                break;
3071
            }
3072
            goto invalid_opc;
3073
        case 0x3D:
3074
            /* MAXUW4 */
3075
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3076
                gen_maxuw4(ra, rb, rc, islit, lit);
3077
                break;
3078
            }
3079
            goto invalid_opc;
3080
        case 0x3E:
3081
            /* MAXSB8 */
3082
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3083
                gen_maxsb8(ra, rb, rc, islit, lit);
3084
                break;
3085
            }
3086
            goto invalid_opc;
3087
        case 0x3F:
3088
            /* MAXSW4 */
3089
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3090
                gen_maxsw4(ra, rb, rc, islit, lit);
3091
                break;
3092
            }
3093
            goto invalid_opc;
3094
        case 0x70:
3095
            /* FTOIT */
3096
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3097
                goto invalid_opc;
3098
            }
3099
            if (likely(rc != 31)) {
3100
                if (ra != 31)
3101
                    tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3102
                else
3103
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
3104
            }
3105
            break;
3106
        case 0x78:
3107
            /* FTOIS */
3108
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3109
                goto invalid_opc;
3110
            }
3111
            if (rc != 31) {
3112
                TCGv_i32 tmp1 = tcg_temp_new_i32();
3113
                if (ra != 31)
3114
                    gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3115
                else {
3116
                    TCGv tmp2 = tcg_const_i64(0);
3117
                    gen_helper_s_to_memory(tmp1, tmp2);
3118
                    tcg_temp_free(tmp2);
3119
                }
3120
                tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3121
                tcg_temp_free_i32(tmp1);
3122
            }
3123
            break;
3124
        default:
3125
            goto invalid_opc;
3126
        }
3127
        break;
3128
    case 0x1D:
3129
        /* HW_MTPR (PALcode) */
3130
#ifndef CONFIG_USER_ONLY
3131
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3132
            return gen_mtpr(ctx, rb, insn & 0xffff);
3133
        }
3134
#endif
3135
        goto invalid_opc;
3136
    case 0x1E:
3137
        /* HW_RET (PALcode) */
3138
#ifndef CONFIG_USER_ONLY
3139
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3140
            if (rb == 31) {
3141
                /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3142
                   address from EXC_ADDR.  This turns out to be useful for our
3143
                   emulation PALcode, so continue to accept it.  */
3144
                TCGv tmp = tcg_temp_new();
3145
                tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
3146
                gen_helper_hw_ret(tmp);
3147
                tcg_temp_free(tmp);
3148
            } else {
3149
                gen_helper_hw_ret(cpu_ir[rb]);
3150
            }
3151
            ret = EXIT_PC_UPDATED;
3152
            break;
3153
        }
3154
#endif
3155
        goto invalid_opc;
3156
    case 0x1F:
3157
        /* HW_ST (PALcode) */
3158
#ifndef CONFIG_USER_ONLY
3159
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3160
            TCGv addr, val;
3161
            addr = tcg_temp_new();
3162
            if (rb != 31)
3163
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3164
            else
3165
                tcg_gen_movi_i64(addr, disp12);
3166
            if (ra != 31)
3167
                val = cpu_ir[ra];
3168
            else {
3169
                val = tcg_temp_new();
3170
                tcg_gen_movi_i64(val, 0);
3171
            }
3172
            switch ((insn >> 12) & 0xF) {
3173
            case 0x0:
3174
                /* Longword physical access */
3175
                gen_helper_stl_phys(addr, val);
3176
                break;
3177
            case 0x1:
3178
                /* Quadword physical access */
3179
                gen_helper_stq_phys(addr, val);
3180
                break;
3181
            case 0x2:
3182
                /* Longword physical access with lock */
3183
                gen_helper_stl_c_phys(val, addr, val);
3184
                break;
3185
            case 0x3:
3186
                /* Quadword physical access with lock */
3187
                gen_helper_stq_c_phys(val, addr, val);
3188
                break;
3189
            case 0x4:
3190
                /* Longword virtual access */
3191
                goto invalid_opc;
3192
            case 0x5:
3193
                /* Quadword virtual access */
3194
                goto invalid_opc;
3195
            case 0x6:
3196
                /* Invalid */
3197
                goto invalid_opc;
3198
            case 0x7:
3199
                /* Invalid */
3200
                goto invalid_opc;
3201
            case 0x8:
3202
                /* Invalid */
3203
                goto invalid_opc;
3204
            case 0x9:
3205
                /* Invalid */
3206
                goto invalid_opc;
3207
            case 0xA:
3208
                /* Invalid */
3209
                goto invalid_opc;
3210
            case 0xB:
3211
                /* Invalid */
3212
                goto invalid_opc;
3213
            case 0xC:
3214
                /* Longword virtual access with alternate access mode */
3215
                goto invalid_opc;
3216
            case 0xD:
3217
                /* Quadword virtual access with alternate access mode */
3218
                goto invalid_opc;
3219
            case 0xE:
3220
                /* Invalid */
3221
                goto invalid_opc;
3222
            case 0xF:
3223
                /* Invalid */
3224
                goto invalid_opc;
3225
            }
3226
            if (ra == 31)
3227
                tcg_temp_free(val);
3228
            tcg_temp_free(addr);
3229
            break;
3230
        }
3231
#endif
3232
        goto invalid_opc;
3233
    case 0x20:
3234
        /* LDF */
3235
        gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3236
        break;
3237
    case 0x21:
3238
        /* LDG */
3239
        gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3240
        break;
3241
    case 0x22:
3242
        /* LDS */
3243
        gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3244
        break;
3245
    case 0x23:
3246
        /* LDT */
3247
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3248
        break;
3249
    case 0x24:
3250
        /* STF */
3251
        gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3252
        break;
3253
    case 0x25:
3254
        /* STG */
3255
        gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3256
        break;
3257
    case 0x26:
3258
        /* STS */
3259
        gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3260
        break;
3261
    case 0x27:
3262
        /* STT */
3263
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3264
        break;
3265
    case 0x28:
3266
        /* LDL */
3267
        gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3268
        break;
3269
    case 0x29:
3270
        /* LDQ */
3271
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3272
        break;
3273
    case 0x2A:
3274
        /* LDL_L */
3275
        gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3276
        break;
3277
    case 0x2B:
3278
        /* LDQ_L */
3279
        gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3280
        break;
3281
    case 0x2C:
3282
        /* STL */
3283
        gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3284
        break;
3285
    case 0x2D:
3286
        /* STQ */
3287
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3288
        break;
3289
    case 0x2E:
3290
        /* STL_C */
3291
        ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3292
        break;
3293
    case 0x2F:
3294
        /* STQ_C */
3295
        ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3296
        break;
3297
    case 0x30:
3298
        /* BR */
3299
        ret = gen_bdirect(ctx, ra, disp21);
3300
        break;
3301
    case 0x31: /* FBEQ */
3302
        ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3303
        break;
3304
    case 0x32: /* FBLT */
3305
        ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3306
        break;
3307
    case 0x33: /* FBLE */
3308
        ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3309
        break;
3310
    case 0x34:
3311
        /* BSR */
3312
        ret = gen_bdirect(ctx, ra, disp21);
3313
        break;
3314
    case 0x35: /* FBNE */
3315
        ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3316
        break;
3317
    case 0x36: /* FBGE */
3318
        ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3319
        break;
3320
    case 0x37: /* FBGT */
3321
        ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3322
        break;
3323
    case 0x38:
3324
        /* BLBC */
3325
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3326
        break;
3327
    case 0x39:
3328
        /* BEQ */
3329
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3330
        break;
3331
    case 0x3A:
3332
        /* BLT */
3333
        ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3334
        break;
3335
    case 0x3B:
3336
        /* BLE */
3337
        ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3338
        break;
3339
    case 0x3C:
3340
        /* BLBS */
3341
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3342
        break;
3343
    case 0x3D:
3344
        /* BNE */
3345
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3346
        break;
3347
    case 0x3E:
3348
        /* BGE */
3349
        ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3350
        break;
3351
    case 0x3F:
3352
        /* BGT */
3353
        ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3354
        break;
3355
    invalid_opc:
3356
        ret = gen_invalid(ctx);
3357
        break;
3358
    }
3359

    
3360
    return ret;
3361
}
3362

    
3363
static inline void gen_intermediate_code_internal(CPUAlphaState *env,
3364
                                                  TranslationBlock *tb,
3365
                                                  int search_pc)
3366
{
3367
    DisasContext ctx, *ctxp = &ctx;
3368
    target_ulong pc_start;
3369
    uint32_t insn;
3370
    uint16_t *gen_opc_end;
3371
    CPUBreakpoint *bp;
3372
    int j, lj = -1;
3373
    ExitStatus ret;
3374
    int num_insns;
3375
    int max_insns;
3376

    
3377
    pc_start = tb->pc;
3378
    gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3379

    
3380
    ctx.tb = tb;
3381
    ctx.env = env;
3382
    ctx.pc = pc_start;
3383
    ctx.mem_idx = cpu_mmu_index(env);
3384

    
3385
    /* ??? Every TB begins with unset rounding mode, to be initialized on
3386
       the first fp insn of the TB.  Alternately we could define a proper
3387
       default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3388
       to reset the FP_STATUS to that default at the end of any TB that
3389
       changes the default.  We could even (gasp) dynamiclly figure out
3390
       what default would be most efficient given the running program.  */
3391
    ctx.tb_rm = -1;
3392
    /* Similarly for flush-to-zero.  */
3393
    ctx.tb_ftz = -1;
3394

    
3395
    num_insns = 0;
3396
    max_insns = tb->cflags & CF_COUNT_MASK;
3397
    if (max_insns == 0)
3398
        max_insns = CF_COUNT_MASK;
3399

    
3400
    gen_icount_start();
3401
    do {
3402
        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3403
            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3404
                if (bp->pc == ctx.pc) {
3405
                    gen_excp(&ctx, EXCP_DEBUG, 0);
3406
                    break;
3407
                }
3408
            }
3409
        }
3410
        if (search_pc) {
3411
            j = gen_opc_ptr - gen_opc_buf;
3412
            if (lj < j) {
3413
                lj++;
3414
                while (lj < j)
3415
                    gen_opc_instr_start[lj++] = 0;
3416
            }
3417
            gen_opc_pc[lj] = ctx.pc;
3418
            gen_opc_instr_start[lj] = 1;
3419
            gen_opc_icount[lj] = num_insns;
3420
        }
3421
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3422
            gen_io_start();
3423
        insn = ldl_code(ctx.pc);
3424
        num_insns++;
3425

    
3426
        if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3427
            tcg_gen_debug_insn_start(ctx.pc);
3428
        }
3429

    
3430
        ctx.pc += 4;
3431
        ret = translate_one(ctxp, insn);
3432

    
3433
        /* If we reach a page boundary, are single stepping,
3434
           or exhaust instruction count, stop generation.  */
3435
        if (ret == NO_EXIT
3436
            && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3437
                || gen_opc_ptr >= gen_opc_end
3438
                || num_insns >= max_insns
3439
                || singlestep
3440
                || env->singlestep_enabled)) {
3441
            ret = EXIT_PC_STALE;
3442
        }
3443
    } while (ret == NO_EXIT);
3444

    
3445
    if (tb->cflags & CF_LAST_IO) {
3446
        gen_io_end();
3447
    }
3448

    
3449
    switch (ret) {
3450
    case EXIT_GOTO_TB:
3451
    case EXIT_NORETURN:
3452
        break;
3453
    case EXIT_PC_STALE:
3454
        tcg_gen_movi_i64(cpu_pc, ctx.pc);
3455
        /* FALLTHRU */
3456
    case EXIT_PC_UPDATED:
3457
        if (env->singlestep_enabled) {
3458
            gen_excp_1(EXCP_DEBUG, 0);
3459
        } else {
3460
            tcg_gen_exit_tb(0);
3461
        }
3462
        break;
3463
    default:
3464
        abort();
3465
    }
3466

    
3467
    gen_icount_end(tb, num_insns);
3468
    *gen_opc_ptr = INDEX_op_end;
3469
    if (search_pc) {
3470
        j = gen_opc_ptr - gen_opc_buf;
3471
        lj++;
3472
        while (lj <= j)
3473
            gen_opc_instr_start[lj++] = 0;
3474
    } else {
3475
        tb->size = ctx.pc - pc_start;
3476
        tb->icount = num_insns;
3477
    }
3478

    
3479
#ifdef DEBUG_DISAS
3480
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3481
        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3482
        log_target_disas(pc_start, ctx.pc - pc_start, 1);
3483
        qemu_log("\n");
3484
    }
3485
#endif
3486
}
3487

    
3488
void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
3489
{
3490
    gen_intermediate_code_internal(env, tb, 0);
3491
}
3492

    
3493
void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
3494
{
3495
    gen_intermediate_code_internal(env, tb, 1);
3496
}
3497

    
3498
struct cpu_def_t {
3499
    const char *name;
3500
    int implver, amask;
3501
};
3502

    
3503
static const struct cpu_def_t cpu_defs[] = {
3504
    { "ev4",   IMPLVER_2106x, 0 },
3505
    { "ev5",   IMPLVER_21164, 0 },
3506
    { "ev56",  IMPLVER_21164, AMASK_BWX },
3507
    { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3508
    { "ev6",   IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3509
    { "ev67",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3510
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3511
    { "ev68",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3512
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3513
    { "21064", IMPLVER_2106x, 0 },
3514
    { "21164", IMPLVER_21164, 0 },
3515
    { "21164a", IMPLVER_21164, AMASK_BWX },
3516
    { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3517
    { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3518
    { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3519
                                | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3520
};
3521

    
3522
CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3523
{
3524
    CPUAlphaState *env;
3525
    int implver, amask, i, max;
3526

    
3527
    env = g_malloc0(sizeof(CPUAlphaState));
3528
    cpu_exec_init(env);
3529
    alpha_translate_init();
3530
    tlb_flush(env, 1);
3531

    
3532
    /* Default to ev67; no reason not to emulate insns by default.  */
3533
    implver = IMPLVER_21264;
3534
    amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3535
             | AMASK_TRAP | AMASK_PREFETCH);
3536

    
3537
    max = ARRAY_SIZE(cpu_defs);
3538
    for (i = 0; i < max; i++) {
3539
        if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3540
            implver = cpu_defs[i].implver;
3541
            amask = cpu_defs[i].amask;
3542
            break;
3543
        }
3544
    }
3545
    env->implver = implver;
3546
    env->amask = amask;
3547

    
3548
#if defined (CONFIG_USER_ONLY)
3549
    env->ps = PS_USER_MODE;
3550
    cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3551
                               | FPCR_UNFD | FPCR_INED | FPCR_DNOD
3552
                               | FPCR_DYN_NORMAL));
3553
#endif
3554
    env->lock_addr = -1;
3555
    env->fen = 1;
3556

    
3557
    qemu_init_vcpu(env);
3558
    return env;
3559
}
3560

    
3561
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
3562
{
3563
    env->pc = gen_opc_pc[pc_pos];
3564
}