Statistics
| Branch: | Revision:

root / target-alpha / translate.c @ 2b85cf0e

History | View | Annotate | Download (106.7 kB)

1
/*
2
 *  Alpha emulation cpu translation for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "cpu.h"
21
#include "disas.h"
22
#include "host-utils.h"
23
#include "tcg-op.h"
24

    
25
#include "helper.h"
26
#define GEN_HELPER 1
27
#include "helper.h"
28

    
29
#undef ALPHA_DEBUG_DISAS
30
#define CONFIG_SOFTFLOAT_INLINE
31

    
32
#ifdef ALPHA_DEBUG_DISAS
33
#  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34
#else
35
#  define LOG_DISAS(...) do { } while (0)
36
#endif
37

    
38
typedef struct DisasContext DisasContext;
39
struct DisasContext {
40
    struct TranslationBlock *tb;
41
    CPUAlphaState *env;
42
    uint64_t pc;
43
    int mem_idx;
44

    
45
    /* Current rounding mode for this TB.  */
46
    int tb_rm;
47
    /* Current flush-to-zero setting for this TB.  */
48
    int tb_ftz;
49
};
50

    
51
/* Return values from translate_one, indicating the state of the TB.
52
   Note that zero indicates that we are not exiting the TB.  */
53

    
54
typedef enum {
55
    NO_EXIT,
56

    
57
    /* We have emitted one or more goto_tb.  No fixup required.  */
58
    EXIT_GOTO_TB,
59

    
60
    /* We are not using a goto_tb (for whatever reason), but have updated
61
       the PC (for whatever reason), so there's no need to do it again on
62
       exiting the TB.  */
63
    EXIT_PC_UPDATED,
64

    
65
    /* We are exiting the TB, but have neither emitted a goto_tb, nor
66
       updated the PC for the next instruction to be executed.  */
67
    EXIT_PC_STALE,
68

    
69
    /* We are ending the TB with a noreturn function call, e.g. longjmp.
70
       No following code will be executed.  */
71
    EXIT_NORETURN,
72
} ExitStatus;
73

    
74
/* global register indexes */
75
static TCGv_ptr cpu_env;
76
static TCGv cpu_ir[31];
77
static TCGv cpu_fir[31];
78
static TCGv cpu_pc;
79
static TCGv cpu_lock_addr;
80
static TCGv cpu_lock_st_addr;
81
static TCGv cpu_lock_value;
82
static TCGv cpu_unique;
83
#ifndef CONFIG_USER_ONLY
84
static TCGv cpu_sysval;
85
static TCGv cpu_usp;
86
#endif
87

    
88
/* register names */
89
static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
90

    
91
#include "gen-icount.h"
92

    
93
static void alpha_translate_init(void)
94
{
95
    int i;
96
    char *p;
97
    static int done_init = 0;
98

    
99
    if (done_init)
100
        return;
101

    
102
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103

    
104
    p = cpu_reg_names;
105
    for (i = 0; i < 31; i++) {
106
        sprintf(p, "ir%d", i);
107
        cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
108
                                           offsetof(CPUAlphaState, ir[i]), p);
109
        p += (i < 10) ? 4 : 5;
110

    
111
        sprintf(p, "fir%d", i);
112
        cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
113
                                            offsetof(CPUAlphaState, fir[i]), p);
114
        p += (i < 10) ? 5 : 6;
115
    }
116

    
117
    cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
118
                                    offsetof(CPUAlphaState, pc), "pc");
119

    
120
    cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
121
                                           offsetof(CPUAlphaState, lock_addr),
122
                                           "lock_addr");
123
    cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
124
                                              offsetof(CPUAlphaState, lock_st_addr),
125
                                              "lock_st_addr");
126
    cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
127
                                            offsetof(CPUAlphaState, lock_value),
128
                                            "lock_value");
129

    
130
    cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
131
                                        offsetof(CPUAlphaState, unique), "unique");
132
#ifndef CONFIG_USER_ONLY
133
    cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
134
                                        offsetof(CPUAlphaState, sysval), "sysval");
135
    cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
136
                                     offsetof(CPUAlphaState, usp), "usp");
137
#endif
138

    
139
    /* register helpers */
140
#define GEN_HELPER 2
141
#include "helper.h"
142

    
143
    done_init = 1;
144
}
145

    
146
static void gen_excp_1(int exception, int error_code)
147
{
148
    TCGv_i32 tmp1, tmp2;
149

    
150
    tmp1 = tcg_const_i32(exception);
151
    tmp2 = tcg_const_i32(error_code);
152
    gen_helper_excp(cpu_env, tmp1, tmp2);
153
    tcg_temp_free_i32(tmp2);
154
    tcg_temp_free_i32(tmp1);
155
}
156

    
157
static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
158
{
159
    tcg_gen_movi_i64(cpu_pc, ctx->pc);
160
    gen_excp_1(exception, error_code);
161
    return EXIT_NORETURN;
162
}
163

    
164
static inline ExitStatus gen_invalid(DisasContext *ctx)
165
{
166
    return gen_excp(ctx, EXCP_OPCDEC, 0);
167
}
168

    
169
static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
170
{
171
    TCGv tmp = tcg_temp_new();
172
    TCGv_i32 tmp32 = tcg_temp_new_i32();
173
    tcg_gen_qemu_ld32u(tmp, t1, flags);
174
    tcg_gen_trunc_i64_i32(tmp32, tmp);
175
    gen_helper_memory_to_f(t0, tmp32);
176
    tcg_temp_free_i32(tmp32);
177
    tcg_temp_free(tmp);
178
}
179

    
180
static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
181
{
182
    TCGv tmp = tcg_temp_new();
183
    tcg_gen_qemu_ld64(tmp, t1, flags);
184
    gen_helper_memory_to_g(t0, tmp);
185
    tcg_temp_free(tmp);
186
}
187

    
188
static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
189
{
190
    TCGv tmp = tcg_temp_new();
191
    TCGv_i32 tmp32 = tcg_temp_new_i32();
192
    tcg_gen_qemu_ld32u(tmp, t1, flags);
193
    tcg_gen_trunc_i64_i32(tmp32, tmp);
194
    gen_helper_memory_to_s(t0, tmp32);
195
    tcg_temp_free_i32(tmp32);
196
    tcg_temp_free(tmp);
197
}
198

    
199
static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
200
{
201
    tcg_gen_qemu_ld32s(t0, t1, flags);
202
    tcg_gen_mov_i64(cpu_lock_addr, t1);
203
    tcg_gen_mov_i64(cpu_lock_value, t0);
204
}
205

    
206
static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
207
{
208
    tcg_gen_qemu_ld64(t0, t1, flags);
209
    tcg_gen_mov_i64(cpu_lock_addr, t1);
210
    tcg_gen_mov_i64(cpu_lock_value, t0);
211
}
212

    
213
static inline void gen_load_mem(DisasContext *ctx,
214
                                void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
215
                                                          int flags),
216
                                int ra, int rb, int32_t disp16, int fp,
217
                                int clear)
218
{
219
    TCGv addr, va;
220

    
221
    /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
222
       prefetches, which we can treat as nops.  No worries about
223
       missed exceptions here.  */
224
    if (unlikely(ra == 31)) {
225
        return;
226
    }
227

    
228
    addr = tcg_temp_new();
229
    if (rb != 31) {
230
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
231
        if (clear) {
232
            tcg_gen_andi_i64(addr, addr, ~0x7);
233
        }
234
    } else {
235
        if (clear) {
236
            disp16 &= ~0x7;
237
        }
238
        tcg_gen_movi_i64(addr, disp16);
239
    }
240

    
241
    va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
242
    tcg_gen_qemu_load(va, addr, ctx->mem_idx);
243

    
244
    tcg_temp_free(addr);
245
}
246

    
247
static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
248
{
249
    TCGv_i32 tmp32 = tcg_temp_new_i32();
250
    TCGv tmp = tcg_temp_new();
251
    gen_helper_f_to_memory(tmp32, t0);
252
    tcg_gen_extu_i32_i64(tmp, tmp32);
253
    tcg_gen_qemu_st32(tmp, t1, flags);
254
    tcg_temp_free(tmp);
255
    tcg_temp_free_i32(tmp32);
256
}
257

    
258
static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
259
{
260
    TCGv tmp = tcg_temp_new();
261
    gen_helper_g_to_memory(tmp, t0);
262
    tcg_gen_qemu_st64(tmp, t1, flags);
263
    tcg_temp_free(tmp);
264
}
265

    
266
static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
267
{
268
    TCGv_i32 tmp32 = tcg_temp_new_i32();
269
    TCGv tmp = tcg_temp_new();
270
    gen_helper_s_to_memory(tmp32, t0);
271
    tcg_gen_extu_i32_i64(tmp, tmp32);
272
    tcg_gen_qemu_st32(tmp, t1, flags);
273
    tcg_temp_free(tmp);
274
    tcg_temp_free_i32(tmp32);
275
}
276

    
277
static inline void gen_store_mem(DisasContext *ctx,
278
                                 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
279
                                                            int flags),
280
                                 int ra, int rb, int32_t disp16, int fp,
281
                                 int clear)
282
{
283
    TCGv addr, va;
284

    
285
    addr = tcg_temp_new();
286
    if (rb != 31) {
287
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
288
        if (clear) {
289
            tcg_gen_andi_i64(addr, addr, ~0x7);
290
        }
291
    } else {
292
        if (clear) {
293
            disp16 &= ~0x7;
294
        }
295
        tcg_gen_movi_i64(addr, disp16);
296
    }
297

    
298
    if (ra == 31) {
299
        va = tcg_const_i64(0);
300
    } else {
301
        va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
302
    }
303
    tcg_gen_qemu_store(va, addr, ctx->mem_idx);
304

    
305
    tcg_temp_free(addr);
306
    if (ra == 31) {
307
        tcg_temp_free(va);
308
    }
309
}
310

    
311
static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
312
                                        int32_t disp16, int quad)
313
{
314
    TCGv addr;
315

    
316
    if (ra == 31) {
317
        /* ??? Don't bother storing anything.  The user can't tell
318
           the difference, since the zero register always reads zero.  */
319
        return NO_EXIT;
320
    }
321

    
322
#if defined(CONFIG_USER_ONLY)
323
    addr = cpu_lock_st_addr;
324
#else
325
    addr = tcg_temp_local_new();
326
#endif
327

    
328
    if (rb != 31) {
329
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
330
    } else {
331
        tcg_gen_movi_i64(addr, disp16);
332
    }
333

    
334
#if defined(CONFIG_USER_ONLY)
335
    /* ??? This is handled via a complicated version of compare-and-swap
336
       in the cpu_loop.  Hopefully one day we'll have a real CAS opcode
337
       in TCG so that this isn't necessary.  */
338
    return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
339
#else
340
    /* ??? In system mode we are never multi-threaded, so CAS can be
341
       implemented via a non-atomic load-compare-store sequence.  */
342
    {
343
        int lab_fail, lab_done;
344
        TCGv val;
345

    
346
        lab_fail = gen_new_label();
347
        lab_done = gen_new_label();
348
        tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
349

    
350
        val = tcg_temp_new();
351
        if (quad) {
352
            tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
353
        } else {
354
            tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
355
        }
356
        tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
357

    
358
        if (quad) {
359
            tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
360
        } else {
361
            tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
362
        }
363
        tcg_gen_movi_i64(cpu_ir[ra], 1);
364
        tcg_gen_br(lab_done);
365

    
366
        gen_set_label(lab_fail);
367
        tcg_gen_movi_i64(cpu_ir[ra], 0);
368

    
369
        gen_set_label(lab_done);
370
        tcg_gen_movi_i64(cpu_lock_addr, -1);
371

    
372
        tcg_temp_free(addr);
373
        return NO_EXIT;
374
    }
375
#endif
376
}
377

    
378
static int use_goto_tb(DisasContext *ctx, uint64_t dest)
379
{
380
    /* Check for the dest on the same page as the start of the TB.  We
381
       also want to suppress goto_tb in the case of single-steping and IO.  */
382
    return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
383
            && !ctx->env->singlestep_enabled
384
            && !(ctx->tb->cflags & CF_LAST_IO));
385
}
386

    
387
static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
388
{
389
    uint64_t dest = ctx->pc + (disp << 2);
390

    
391
    if (ra != 31) {
392
        tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
393
    }
394

    
395
    /* Notice branch-to-next; used to initialize RA with the PC.  */
396
    if (disp == 0) {
397
        return 0;
398
    } else if (use_goto_tb(ctx, dest)) {
399
        tcg_gen_goto_tb(0);
400
        tcg_gen_movi_i64(cpu_pc, dest);
401
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
402
        return EXIT_GOTO_TB;
403
    } else {
404
        tcg_gen_movi_i64(cpu_pc, dest);
405
        return EXIT_PC_UPDATED;
406
    }
407
}
408

    
409
static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
410
                                     TCGv cmp, int32_t disp)
411
{
412
    uint64_t dest = ctx->pc + (disp << 2);
413
    int lab_true = gen_new_label();
414

    
415
    if (use_goto_tb(ctx, dest)) {
416
        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
417

    
418
        tcg_gen_goto_tb(0);
419
        tcg_gen_movi_i64(cpu_pc, ctx->pc);
420
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
421

    
422
        gen_set_label(lab_true);
423
        tcg_gen_goto_tb(1);
424
        tcg_gen_movi_i64(cpu_pc, dest);
425
        tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
426

    
427
        return EXIT_GOTO_TB;
428
    } else {
429
        int lab_over = gen_new_label();
430

    
431
        /* ??? Consider using either
432
             movi pc, next
433
             addi tmp, pc, disp
434
             movcond pc, cond, 0, tmp, pc
435
           or
436
             setcond tmp, cond, 0
437
             movi pc, next
438
             neg tmp, tmp
439
             andi tmp, tmp, disp
440
             add pc, pc, tmp
441
           The current diamond subgraph surely isn't efficient.  */
442

    
443
        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
444
        tcg_gen_movi_i64(cpu_pc, ctx->pc);
445
        tcg_gen_br(lab_over);
446
        gen_set_label(lab_true);
447
        tcg_gen_movi_i64(cpu_pc, dest);
448
        gen_set_label(lab_over);
449

    
450
        return EXIT_PC_UPDATED;
451
    }
452
}
453

    
454
static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
455
                            int32_t disp, int mask)
456
{
457
    TCGv cmp_tmp;
458

    
459
    if (unlikely(ra == 31)) {
460
        cmp_tmp = tcg_const_i64(0);
461
    } else {
462
        cmp_tmp = tcg_temp_new();
463
        if (mask) {
464
            tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
465
        } else {
466
            tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
467
        }
468
    }
469

    
470
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
471
}
472

    
473
/* Fold -0.0 for comparison with COND.  */
474

    
475
static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
476
{
477
    uint64_t mzero = 1ull << 63;
478

    
479
    switch (cond) {
480
    case TCG_COND_LE:
481
    case TCG_COND_GT:
482
        /* For <= or >, the -0.0 value directly compares the way we want.  */
483
        tcg_gen_mov_i64(dest, src);
484
        break;
485

    
486
    case TCG_COND_EQ:
487
    case TCG_COND_NE:
488
        /* For == or !=, we can simply mask off the sign bit and compare.  */
489
        tcg_gen_andi_i64(dest, src, mzero - 1);
490
        break;
491

    
492
    case TCG_COND_GE:
493
    case TCG_COND_LT:
494
        /* For >= or <, map -0.0 to +0.0 via comparison and mask.  */
495
        tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
496
        tcg_gen_neg_i64(dest, dest);
497
        tcg_gen_and_i64(dest, dest, src);
498
        break;
499

    
500
    default:
501
        abort();
502
    }
503
}
504

    
505
static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
506
                             int32_t disp)
507
{
508
    TCGv cmp_tmp;
509

    
510
    if (unlikely(ra == 31)) {
511
        /* Very uncommon case, but easier to optimize it to an integer
512
           comparison than continuing with the floating point comparison.  */
513
        return gen_bcond(ctx, cond, ra, disp, 0);
514
    }
515

    
516
    cmp_tmp = tcg_temp_new();
517
    gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
518
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
519
}
520

    
521
static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
522
                     int islit, uint8_t lit, int mask)
523
{
524
    TCGCond inv_cond = tcg_invert_cond(cond);
525
    int l1;
526

    
527
    if (unlikely(rc == 31))
528
        return;
529

    
530
    l1 = gen_new_label();
531

    
532
    if (ra != 31) {
533
        if (mask) {
534
            TCGv tmp = tcg_temp_new();
535
            tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
536
            tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
537
            tcg_temp_free(tmp);
538
        } else
539
            tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
540
    } else {
541
        /* Very uncommon case - Do not bother to optimize.  */
542
        TCGv tmp = tcg_const_i64(0);
543
        tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
544
        tcg_temp_free(tmp);
545
    }
546

    
547
    if (islit)
548
        tcg_gen_movi_i64(cpu_ir[rc], lit);
549
    else
550
        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
551
    gen_set_label(l1);
552
}
553

    
554
static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
555
{
556
    TCGv cmp_tmp;
557
    int l1;
558

    
559
    if (unlikely(rc == 31)) {
560
        return;
561
    }
562

    
563
    cmp_tmp = tcg_temp_new();
564
    if (unlikely(ra == 31)) {
565
        tcg_gen_movi_i64(cmp_tmp, 0);
566
    } else {
567
        gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
568
    }
569

    
570
    l1 = gen_new_label();
571
    tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
572
    tcg_temp_free(cmp_tmp);
573

    
574
    if (rb != 31)
575
        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
576
    else
577
        tcg_gen_movi_i64(cpu_fir[rc], 0);
578
    gen_set_label(l1);
579
}
580

    
581
#define QUAL_RM_N       0x080   /* Round mode nearest even */
582
#define QUAL_RM_C       0x000   /* Round mode chopped */
583
#define QUAL_RM_M       0x040   /* Round mode minus infinity */
584
#define QUAL_RM_D       0x0c0   /* Round mode dynamic */
585
#define QUAL_RM_MASK    0x0c0
586

    
587
#define QUAL_U          0x100   /* Underflow enable (fp output) */
588
#define QUAL_V          0x100   /* Overflow enable (int output) */
589
#define QUAL_S          0x400   /* Software completion enable */
590
#define QUAL_I          0x200   /* Inexact detection enable */
591

    
592
static void gen_qual_roundmode(DisasContext *ctx, int fn11)
593
{
594
    TCGv_i32 tmp;
595

    
596
    fn11 &= QUAL_RM_MASK;
597
    if (fn11 == ctx->tb_rm) {
598
        return;
599
    }
600
    ctx->tb_rm = fn11;
601

    
602
    tmp = tcg_temp_new_i32();
603
    switch (fn11) {
604
    case QUAL_RM_N:
605
        tcg_gen_movi_i32(tmp, float_round_nearest_even);
606
        break;
607
    case QUAL_RM_C:
608
        tcg_gen_movi_i32(tmp, float_round_to_zero);
609
        break;
610
    case QUAL_RM_M:
611
        tcg_gen_movi_i32(tmp, float_round_down);
612
        break;
613
    case QUAL_RM_D:
614
        tcg_gen_ld8u_i32(tmp, cpu_env,
615
                         offsetof(CPUAlphaState, fpcr_dyn_round));
616
        break;
617
    }
618

    
619
#if defined(CONFIG_SOFTFLOAT_INLINE)
620
    /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
621
       With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
622
       sets the one field.  */
623
    tcg_gen_st8_i32(tmp, cpu_env,
624
                    offsetof(CPUAlphaState, fp_status.float_rounding_mode));
625
#else
626
    gen_helper_setroundmode(tmp);
627
#endif
628

    
629
    tcg_temp_free_i32(tmp);
630
}
631

    
632
static void gen_qual_flushzero(DisasContext *ctx, int fn11)
633
{
634
    TCGv_i32 tmp;
635

    
636
    fn11 &= QUAL_U;
637
    if (fn11 == ctx->tb_ftz) {
638
        return;
639
    }
640
    ctx->tb_ftz = fn11;
641

    
642
    tmp = tcg_temp_new_i32();
643
    if (fn11) {
644
        /* Underflow is enabled, use the FPCR setting.  */
645
        tcg_gen_ld8u_i32(tmp, cpu_env,
646
                         offsetof(CPUAlphaState, fpcr_flush_to_zero));
647
    } else {
648
        /* Underflow is disabled, force flush-to-zero.  */
649
        tcg_gen_movi_i32(tmp, 1);
650
    }
651

    
652
#if defined(CONFIG_SOFTFLOAT_INLINE)
653
    tcg_gen_st8_i32(tmp, cpu_env,
654
                    offsetof(CPUAlphaState, fp_status.flush_to_zero));
655
#else
656
    gen_helper_setflushzero(tmp);
657
#endif
658

    
659
    tcg_temp_free_i32(tmp);
660
}
661

    
662
static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
663
{
664
    TCGv val;
665
    if (reg == 31) {
666
        val = tcg_const_i64(0);
667
    } else {
668
        if ((fn11 & QUAL_S) == 0) {
669
            if (is_cmp) {
670
                gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
671
            } else {
672
                gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
673
            }
674
        }
675
        val = tcg_temp_new();
676
        tcg_gen_mov_i64(val, cpu_fir[reg]);
677
    }
678
    return val;
679
}
680

    
681
static void gen_fp_exc_clear(void)
682
{
683
#if defined(CONFIG_SOFTFLOAT_INLINE)
684
    TCGv_i32 zero = tcg_const_i32(0);
685
    tcg_gen_st8_i32(zero, cpu_env,
686
                    offsetof(CPUAlphaState, fp_status.float_exception_flags));
687
    tcg_temp_free_i32(zero);
688
#else
689
    gen_helper_fp_exc_clear(cpu_env);
690
#endif
691
}
692

    
693
static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
694
{
695
    /* ??? We ought to be able to do something with imprecise exceptions.
696
       E.g. notice we're still in the trap shadow of something within the
697
       TB and do not generate the code to signal the exception; end the TB
698
       when an exception is forced to arrive, either by consumption of a
699
       register value or TRAPB or EXCB.  */
700
    TCGv_i32 exc = tcg_temp_new_i32();
701
    TCGv_i32 reg;
702

    
703
#if defined(CONFIG_SOFTFLOAT_INLINE)
704
    tcg_gen_ld8u_i32(exc, cpu_env,
705
                     offsetof(CPUAlphaState, fp_status.float_exception_flags));
706
#else
707
    gen_helper_fp_exc_get(exc, cpu_env);
708
#endif
709

    
710
    if (ignore) {
711
        tcg_gen_andi_i32(exc, exc, ~ignore);
712
    }
713

    
714
    /* ??? Pass in the regno of the destination so that the helper can
715
       set EXC_MASK, which contains a bitmask of destination registers
716
       that have caused arithmetic traps.  A simple userspace emulation
717
       does not require this.  We do need it for a guest kernel's entArith,
718
       or if we were to do something clever with imprecise exceptions.  */
719
    reg = tcg_const_i32(rc + 32);
720

    
721
    if (fn11 & QUAL_S) {
722
        gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
723
    } else {
724
        gen_helper_fp_exc_raise(cpu_env, exc, reg);
725
    }
726

    
727
    tcg_temp_free_i32(reg);
728
    tcg_temp_free_i32(exc);
729
}
730

    
731
static inline void gen_fp_exc_raise(int rc, int fn11)
732
{
733
    gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
734
}
735

    
736
static void gen_fcvtlq(int rb, int rc)
737
{
738
    if (unlikely(rc == 31)) {
739
        return;
740
    }
741
    if (unlikely(rb == 31)) {
742
        tcg_gen_movi_i64(cpu_fir[rc], 0);
743
    } else {
744
        TCGv tmp = tcg_temp_new();
745

    
746
        /* The arithmetic right shift here, plus the sign-extended mask below
747
           yields a sign-extended result without an explicit ext32s_i64.  */
748
        tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
749
        tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
750
        tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
751
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
752
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
753

    
754
        tcg_temp_free(tmp);
755
    }
756
}
757

    
758
static void gen_fcvtql(int rb, int rc)
759
{
760
    if (unlikely(rc == 31)) {
761
        return;
762
    }
763
    if (unlikely(rb == 31)) {
764
        tcg_gen_movi_i64(cpu_fir[rc], 0);
765
    } else {
766
        TCGv tmp = tcg_temp_new();
767

    
768
        tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
769
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
770
        tcg_gen_shli_i64(tmp, tmp, 32);
771
        tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
772
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
773

    
774
        tcg_temp_free(tmp);
775
    }
776
}
777

    
778
static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
779
{
780
    if (rb != 31) {
781
        int lab = gen_new_label();
782
        TCGv tmp = tcg_temp_new();
783

    
784
        tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
785
        tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
786
        gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
787

    
788
        gen_set_label(lab);
789
    }
790
    gen_fcvtql(rb, rc);
791
}
792

    
793
#define FARITH2(name)                                                   \
794
    static inline void glue(gen_f, name)(int rb, int rc)                \
795
    {                                                                   \
796
        if (unlikely(rc == 31)) {                                       \
797
            return;                                                     \
798
        }                                                               \
799
        if (rb != 31) {                                                 \
800
            gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]);     \
801
        } else {                                                        \
802
            TCGv tmp = tcg_const_i64(0);                                \
803
            gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp);             \
804
            tcg_temp_free(tmp);                                         \
805
        }                                                               \
806
    }
807

    
808
/* ??? VAX instruction qualifiers ignored.  */
809
FARITH2(sqrtf)
810
FARITH2(sqrtg)
811
FARITH2(cvtgf)
812
FARITH2(cvtgq)
813
FARITH2(cvtqf)
814
FARITH2(cvtqg)
815

    
816
static void gen_ieee_arith2(DisasContext *ctx,
817
                            void (*helper)(TCGv, TCGv_ptr, TCGv),
818
                            int rb, int rc, int fn11)
819
{
820
    TCGv vb;
821

    
822
    /* ??? This is wrong: the instruction is not a nop, it still may
823
       raise exceptions.  */
824
    if (unlikely(rc == 31)) {
825
        return;
826
    }
827

    
828
    gen_qual_roundmode(ctx, fn11);
829
    gen_qual_flushzero(ctx, fn11);
830
    gen_fp_exc_clear();
831

    
832
    vb = gen_ieee_input(rb, fn11, 0);
833
    helper(cpu_fir[rc], cpu_env, vb);
834
    tcg_temp_free(vb);
835

    
836
    gen_fp_exc_raise(rc, fn11);
837
}
838

    
839
#define IEEE_ARITH2(name)                                       \
840
static inline void glue(gen_f, name)(DisasContext *ctx,         \
841
                                     int rb, int rc, int fn11)  \
842
{                                                               \
843
    gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
844
}
845
IEEE_ARITH2(sqrts)
846
IEEE_ARITH2(sqrtt)
847
IEEE_ARITH2(cvtst)
848
IEEE_ARITH2(cvtts)
849

    
850
static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
851
{
852
    TCGv vb;
853
    int ignore = 0;
854

    
855
    /* ??? This is wrong: the instruction is not a nop, it still may
856
       raise exceptions.  */
857
    if (unlikely(rc == 31)) {
858
        return;
859
    }
860

    
861
    /* No need to set flushzero, since we have an integer output.  */
862
    gen_fp_exc_clear();
863
    vb = gen_ieee_input(rb, fn11, 0);
864

    
865
    /* Almost all integer conversions use cropped rounding, and most
866
       also do not have integer overflow enabled.  Special case that.  */
867
    switch (fn11) {
868
    case QUAL_RM_C:
869
        gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
870
        break;
871
    case QUAL_V | QUAL_RM_C:
872
    case QUAL_S | QUAL_V | QUAL_RM_C:
873
        ignore = float_flag_inexact;
874
        /* FALLTHRU */
875
    case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
876
        gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
877
        break;
878
    default:
879
        gen_qual_roundmode(ctx, fn11);
880
        gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
881
        ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
882
        ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
883
        break;
884
    }
885
    tcg_temp_free(vb);
886

    
887
    gen_fp_exc_raise_ignore(rc, fn11, ignore);
888
}
889

    
890
static void gen_ieee_intcvt(DisasContext *ctx,
891
                            void (*helper)(TCGv, TCGv_ptr, TCGv),
892
                            int rb, int rc, int fn11)
893
{
894
    TCGv vb;
895

    
896
    /* ??? This is wrong: the instruction is not a nop, it still may
897
       raise exceptions.  */
898
    if (unlikely(rc == 31)) {
899
        return;
900
    }
901

    
902
    gen_qual_roundmode(ctx, fn11);
903

    
904
    if (rb == 31) {
905
        vb = tcg_const_i64(0);
906
    } else {
907
        vb = cpu_fir[rb];
908
    }
909

    
910
    /* The only exception that can be raised by integer conversion
911
       is inexact.  Thus we only need to worry about exceptions when
912
       inexact handling is requested.  */
913
    if (fn11 & QUAL_I) {
914
        gen_fp_exc_clear();
915
        helper(cpu_fir[rc], cpu_env, vb);
916
        gen_fp_exc_raise(rc, fn11);
917
    } else {
918
        helper(cpu_fir[rc], cpu_env, vb);
919
    }
920

    
921
    if (rb == 31) {
922
        tcg_temp_free(vb);
923
    }
924
}
925

    
926
#define IEEE_INTCVT(name)                                       \
927
static inline void glue(gen_f, name)(DisasContext *ctx,         \
928
                                     int rb, int rc, int fn11)  \
929
{                                                               \
930
    gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
931
}
932
IEEE_INTCVT(cvtqs)
933
IEEE_INTCVT(cvtqt)
934

    
935
static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
936
{
937
    TCGv va, vb, vmask;
938
    int za = 0, zb = 0;
939

    
940
    if (unlikely(rc == 31)) {
941
        return;
942
    }
943

    
944
    vmask = tcg_const_i64(mask);
945

    
946
    TCGV_UNUSED_I64(va);
947
    if (ra == 31) {
948
        if (inv_a) {
949
            va = vmask;
950
        } else {
951
            za = 1;
952
        }
953
    } else {
954
        va = tcg_temp_new_i64();
955
        tcg_gen_mov_i64(va, cpu_fir[ra]);
956
        if (inv_a) {
957
            tcg_gen_andc_i64(va, vmask, va);
958
        } else {
959
            tcg_gen_and_i64(va, va, vmask);
960
        }
961
    }
962

    
963
    TCGV_UNUSED_I64(vb);
964
    if (rb == 31) {
965
        zb = 1;
966
    } else {
967
        vb = tcg_temp_new_i64();
968
        tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
969
    }
970

    
971
    switch (za << 1 | zb) {
972
    case 0 | 0:
973
        tcg_gen_or_i64(cpu_fir[rc], va, vb);
974
        break;
975
    case 0 | 1:
976
        tcg_gen_mov_i64(cpu_fir[rc], va);
977
        break;
978
    case 2 | 0:
979
        tcg_gen_mov_i64(cpu_fir[rc], vb);
980
        break;
981
    case 2 | 1:
982
        tcg_gen_movi_i64(cpu_fir[rc], 0);
983
        break;
984
    }
985

    
986
    tcg_temp_free(vmask);
987
    if (ra != 31) {
988
        tcg_temp_free(va);
989
    }
990
    if (rb != 31) {
991
        tcg_temp_free(vb);
992
    }
993
}
994

    
995
static inline void gen_fcpys(int ra, int rb, int rc)
996
{
997
    gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
998
}
999

    
1000
static inline void gen_fcpysn(int ra, int rb, int rc)
1001
{
1002
    gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1003
}
1004

    
1005
static inline void gen_fcpyse(int ra, int rb, int rc)
1006
{
1007
    gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1008
}
1009

    
1010
#define FARITH3(name)                                                   \
1011
    static inline void glue(gen_f, name)(int ra, int rb, int rc)        \
1012
    {                                                                   \
1013
        TCGv va, vb;                                                    \
1014
                                                                        \
1015
        if (unlikely(rc == 31)) {                                       \
1016
            return;                                                     \
1017
        }                                                               \
1018
        if (ra == 31) {                                                 \
1019
            va = tcg_const_i64(0);                                      \
1020
        } else {                                                        \
1021
            va = cpu_fir[ra];                                           \
1022
        }                                                               \
1023
        if (rb == 31) {                                                 \
1024
            vb = tcg_const_i64(0);                                      \
1025
        } else {                                                        \
1026
            vb = cpu_fir[rb];                                           \
1027
        }                                                               \
1028
                                                                        \
1029
        gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb);              \
1030
                                                                        \
1031
        if (ra == 31) {                                                 \
1032
            tcg_temp_free(va);                                          \
1033
        }                                                               \
1034
        if (rb == 31) {                                                 \
1035
            tcg_temp_free(vb);                                          \
1036
        }                                                               \
1037
    }
1038

    
1039
/* ??? VAX instruction qualifiers ignored.  */
1040
FARITH3(addf)
1041
FARITH3(subf)
1042
FARITH3(mulf)
1043
FARITH3(divf)
1044
FARITH3(addg)
1045
FARITH3(subg)
1046
FARITH3(mulg)
1047
FARITH3(divg)
1048
FARITH3(cmpgeq)
1049
FARITH3(cmpglt)
1050
FARITH3(cmpgle)
1051

    
1052
static void gen_ieee_arith3(DisasContext *ctx,
1053
                            void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1054
                            int ra, int rb, int rc, int fn11)
1055
{
1056
    TCGv va, vb;
1057

    
1058
    /* ??? This is wrong: the instruction is not a nop, it still may
1059
       raise exceptions.  */
1060
    if (unlikely(rc == 31)) {
1061
        return;
1062
    }
1063

    
1064
    gen_qual_roundmode(ctx, fn11);
1065
    gen_qual_flushzero(ctx, fn11);
1066
    gen_fp_exc_clear();
1067

    
1068
    va = gen_ieee_input(ra, fn11, 0);
1069
    vb = gen_ieee_input(rb, fn11, 0);
1070
    helper(cpu_fir[rc], cpu_env, va, vb);
1071
    tcg_temp_free(va);
1072
    tcg_temp_free(vb);
1073

    
1074
    gen_fp_exc_raise(rc, fn11);
1075
}
1076

    
1077
#define IEEE_ARITH3(name)                                               \
1078
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1079
                                     int ra, int rb, int rc, int fn11)  \
1080
{                                                                       \
1081
    gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
1082
}
1083
IEEE_ARITH3(adds)
1084
IEEE_ARITH3(subs)
1085
IEEE_ARITH3(muls)
1086
IEEE_ARITH3(divs)
1087
IEEE_ARITH3(addt)
1088
IEEE_ARITH3(subt)
1089
IEEE_ARITH3(mult)
1090
IEEE_ARITH3(divt)
1091

    
1092
static void gen_ieee_compare(DisasContext *ctx,
1093
                             void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1094
                             int ra, int rb, int rc, int fn11)
1095
{
1096
    TCGv va, vb;
1097

    
1098
    /* ??? This is wrong: the instruction is not a nop, it still may
1099
       raise exceptions.  */
1100
    if (unlikely(rc == 31)) {
1101
        return;
1102
    }
1103

    
1104
    gen_fp_exc_clear();
1105

    
1106
    va = gen_ieee_input(ra, fn11, 1);
1107
    vb = gen_ieee_input(rb, fn11, 1);
1108
    helper(cpu_fir[rc], cpu_env, va, vb);
1109
    tcg_temp_free(va);
1110
    tcg_temp_free(vb);
1111

    
1112
    gen_fp_exc_raise(rc, fn11);
1113
}
1114

    
1115
#define IEEE_CMP3(name)                                                 \
1116
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1117
                                     int ra, int rb, int rc, int fn11)  \
1118
{                                                                       \
1119
    gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
1120
}
1121
IEEE_CMP3(cmptun)
1122
IEEE_CMP3(cmpteq)
1123
IEEE_CMP3(cmptlt)
1124
IEEE_CMP3(cmptle)
1125

    
1126
static inline uint64_t zapnot_mask(uint8_t lit)
1127
{
1128
    uint64_t mask = 0;
1129
    int i;
1130

    
1131
    for (i = 0; i < 8; ++i) {
1132
        if ((lit >> i) & 1)
1133
            mask |= 0xffull << (i * 8);
1134
    }
1135
    return mask;
1136
}
1137

    
1138
/* Implement zapnot with an immediate operand, which expands to some
1139
   form of immediate AND.  This is a basic building block in the
1140
   definition of many of the other byte manipulation instructions.  */
1141
static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1142
{
1143
    switch (lit) {
1144
    case 0x00:
1145
        tcg_gen_movi_i64(dest, 0);
1146
        break;
1147
    case 0x01:
1148
        tcg_gen_ext8u_i64(dest, src);
1149
        break;
1150
    case 0x03:
1151
        tcg_gen_ext16u_i64(dest, src);
1152
        break;
1153
    case 0x0f:
1154
        tcg_gen_ext32u_i64(dest, src);
1155
        break;
1156
    case 0xff:
1157
        tcg_gen_mov_i64(dest, src);
1158
        break;
1159
    default:
1160
        tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1161
        break;
1162
    }
1163
}
1164

    
1165
static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1166
{
1167
    if (unlikely(rc == 31))
1168
        return;
1169
    else if (unlikely(ra == 31))
1170
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1171
    else if (islit)
1172
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1173
    else
1174
        gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1175
}
1176

    
1177
static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1178
{
1179
    if (unlikely(rc == 31))
1180
        return;
1181
    else if (unlikely(ra == 31))
1182
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1183
    else if (islit)
1184
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1185
    else
1186
        gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1187
}
1188

    
1189

    
1190
/* EXTWH, EXTLH, EXTQH */
1191
static void gen_ext_h(int ra, int rb, int rc, int islit,
1192
                      uint8_t lit, uint8_t byte_mask)
1193
{
1194
    if (unlikely(rc == 31))
1195
        return;
1196
    else if (unlikely(ra == 31))
1197
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1198
    else {
1199
        if (islit) {
1200
            lit = (64 - (lit & 7) * 8) & 0x3f;
1201
            tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1202
        } else {
1203
            TCGv tmp1 = tcg_temp_new();
1204
            tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1205
            tcg_gen_shli_i64(tmp1, tmp1, 3);
1206
            tcg_gen_neg_i64(tmp1, tmp1);
1207
            tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1208
            tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1209
            tcg_temp_free(tmp1);
1210
        }
1211
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1212
    }
1213
}
1214

    
1215
/* EXTBL, EXTWL, EXTLL, EXTQL */
1216
static void gen_ext_l(int ra, int rb, int rc, int islit,
1217
                      uint8_t lit, uint8_t byte_mask)
1218
{
1219
    if (unlikely(rc == 31))
1220
        return;
1221
    else if (unlikely(ra == 31))
1222
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1223
    else {
1224
        if (islit) {
1225
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1226
        } else {
1227
            TCGv tmp = tcg_temp_new();
1228
            tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1229
            tcg_gen_shli_i64(tmp, tmp, 3);
1230
            tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1231
            tcg_temp_free(tmp);
1232
        }
1233
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1234
    }
1235
}
1236

    
1237
/* INSWH, INSLH, INSQH */
1238
static void gen_ins_h(int ra, int rb, int rc, int islit,
1239
                      uint8_t lit, uint8_t byte_mask)
1240
{
1241
    if (unlikely(rc == 31))
1242
        return;
1243
    else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1244
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1245
    else {
1246
        TCGv tmp = tcg_temp_new();
1247

    
1248
        /* The instruction description has us left-shift the byte mask
1249
           and extract bits <15:8> and apply that zap at the end.  This
1250
           is equivalent to simply performing the zap first and shifting
1251
           afterward.  */
1252
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1253

    
1254
        if (islit) {
1255
            /* Note that we have handled the lit==0 case above.  */
1256
            tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1257
        } else {
1258
            TCGv shift = tcg_temp_new();
1259

    
1260
            /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1261
               Do this portably by splitting the shift into two parts:
1262
               shift_count-1 and 1.  Arrange for the -1 by using
1263
               ones-complement instead of twos-complement in the negation:
1264
               ~((B & 7) * 8) & 63.  */
1265

    
1266
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1267
            tcg_gen_shli_i64(shift, shift, 3);
1268
            tcg_gen_not_i64(shift, shift);
1269
            tcg_gen_andi_i64(shift, shift, 0x3f);
1270

    
1271
            tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1272
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1273
            tcg_temp_free(shift);
1274
        }
1275
        tcg_temp_free(tmp);
1276
    }
1277
}
1278

    
1279
/* INSBL, INSWL, INSLL, INSQL */
1280
static void gen_ins_l(int ra, int rb, int rc, int islit,
1281
                      uint8_t lit, uint8_t byte_mask)
1282
{
1283
    if (unlikely(rc == 31))
1284
        return;
1285
    else if (unlikely(ra == 31))
1286
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1287
    else {
1288
        TCGv tmp = tcg_temp_new();
1289

    
1290
        /* The instruction description has us left-shift the byte mask
1291
           the same number of byte slots as the data and apply the zap
1292
           at the end.  This is equivalent to simply performing the zap
1293
           first and shifting afterward.  */
1294
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1295

    
1296
        if (islit) {
1297
            tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1298
        } else {
1299
            TCGv shift = tcg_temp_new();
1300
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1301
            tcg_gen_shli_i64(shift, shift, 3);
1302
            tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1303
            tcg_temp_free(shift);
1304
        }
1305
        tcg_temp_free(tmp);
1306
    }
1307
}
1308

    
1309
/* MSKWH, MSKLH, MSKQH */
1310
static void gen_msk_h(int ra, int rb, int rc, int islit,
1311
                      uint8_t lit, uint8_t byte_mask)
1312
{
1313
    if (unlikely(rc == 31))
1314
        return;
1315
    else if (unlikely(ra == 31))
1316
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1317
    else if (islit) {
1318
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1319
    } else {
1320
        TCGv shift = tcg_temp_new();
1321
        TCGv mask = tcg_temp_new();
1322

    
1323
        /* The instruction description is as above, where the byte_mask
1324
           is shifted left, and then we extract bits <15:8>.  This can be
1325
           emulated with a right-shift on the expanded byte mask.  This
1326
           requires extra care because for an input <2:0> == 0 we need a
1327
           shift of 64 bits in order to generate a zero.  This is done by
1328
           splitting the shift into two parts, the variable shift - 1
1329
           followed by a constant 1 shift.  The code we expand below is
1330
           equivalent to ~((B & 7) * 8) & 63.  */
1331

    
1332
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1333
        tcg_gen_shli_i64(shift, shift, 3);
1334
        tcg_gen_not_i64(shift, shift);
1335
        tcg_gen_andi_i64(shift, shift, 0x3f);
1336
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1337
        tcg_gen_shr_i64(mask, mask, shift);
1338
        tcg_gen_shri_i64(mask, mask, 1);
1339

    
1340
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1341

    
1342
        tcg_temp_free(mask);
1343
        tcg_temp_free(shift);
1344
    }
1345
}
1346

    
1347
/* MSKBL, MSKWL, MSKLL, MSKQL */
1348
static void gen_msk_l(int ra, int rb, int rc, int islit,
1349
                      uint8_t lit, uint8_t byte_mask)
1350
{
1351
    if (unlikely(rc == 31))
1352
        return;
1353
    else if (unlikely(ra == 31))
1354
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1355
    else if (islit) {
1356
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1357
    } else {
1358
        TCGv shift = tcg_temp_new();
1359
        TCGv mask = tcg_temp_new();
1360

    
1361
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1362
        tcg_gen_shli_i64(shift, shift, 3);
1363
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1364
        tcg_gen_shl_i64(mask, mask, shift);
1365

    
1366
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1367

    
1368
        tcg_temp_free(mask);
1369
        tcg_temp_free(shift);
1370
    }
1371
}
1372

    
1373
/* Code to call arith3 helpers */
1374
#define ARITH3(name)                                                  \
1375
static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1376
                                    uint8_t lit)                      \
1377
{                                                                     \
1378
    if (unlikely(rc == 31))                                           \
1379
        return;                                                       \
1380
                                                                      \
1381
    if (ra != 31) {                                                   \
1382
        if (islit) {                                                  \
1383
            TCGv tmp = tcg_const_i64(lit);                            \
1384
            gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp);         \
1385
            tcg_temp_free(tmp);                                       \
1386
        } else                                                        \
1387
            gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1388
    } else {                                                          \
1389
        TCGv tmp1 = tcg_const_i64(0);                                 \
1390
        if (islit) {                                                  \
1391
            TCGv tmp2 = tcg_const_i64(lit);                           \
1392
            gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2);             \
1393
            tcg_temp_free(tmp2);                                      \
1394
        } else                                                        \
1395
            gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]);       \
1396
        tcg_temp_free(tmp1);                                          \
1397
    }                                                                 \
1398
}
1399
ARITH3(umulh)
1400
ARITH3(cmpbge)
1401
ARITH3(minub8)
1402
ARITH3(minsb8)
1403
ARITH3(minuw4)
1404
ARITH3(minsw4)
1405
ARITH3(maxub8)
1406
ARITH3(maxsb8)
1407
ARITH3(maxuw4)
1408
ARITH3(maxsw4)
1409
ARITH3(perr)
1410

    
1411
/* Code to call arith3 helpers */
1412
#define ARITH3_EX(name)                                                 \
1413
    static inline void glue(gen_, name)(int ra, int rb, int rc,         \
1414
                                        int islit, uint8_t lit)         \
1415
    {                                                                   \
1416
        if (unlikely(rc == 31)) {                                       \
1417
            return;                                                     \
1418
        }                                                               \
1419
        if (ra != 31) {                                                 \
1420
            if (islit) {                                                \
1421
                TCGv tmp = tcg_const_i64(lit);                          \
1422
                gen_helper_ ## name(cpu_ir[rc], cpu_env,                \
1423
                                    cpu_ir[ra], tmp);                   \
1424
                tcg_temp_free(tmp);                                     \
1425
            } else {                                                    \
1426
                gen_helper_ ## name(cpu_ir[rc], cpu_env,                \
1427
                                    cpu_ir[ra], cpu_ir[rb]);            \
1428
            }                                                           \
1429
        } else {                                                        \
1430
            TCGv tmp1 = tcg_const_i64(0);                               \
1431
            if (islit) {                                                \
1432
                TCGv tmp2 = tcg_const_i64(lit);                         \
1433
                gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2);   \
1434
                tcg_temp_free(tmp2);                                    \
1435
            } else {                                                    \
1436
                gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1437
            }                                                           \
1438
            tcg_temp_free(tmp1);                                        \
1439
        }                                                               \
1440
    }
1441
ARITH3_EX(addlv)
1442
ARITH3_EX(sublv)
1443
ARITH3_EX(addqv)
1444
ARITH3_EX(subqv)
1445
ARITH3_EX(mullv)
1446
ARITH3_EX(mulqv)
1447

    
1448
#define MVIOP2(name)                                    \
1449
static inline void glue(gen_, name)(int rb, int rc)     \
1450
{                                                       \
1451
    if (unlikely(rc == 31))                             \
1452
        return;                                         \
1453
    if (unlikely(rb == 31))                             \
1454
        tcg_gen_movi_i64(cpu_ir[rc], 0);                \
1455
    else                                                \
1456
        gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]);   \
1457
}
1458
MVIOP2(pklb)
1459
MVIOP2(pkwb)
1460
MVIOP2(unpkbl)
1461
MVIOP2(unpkbw)
1462

    
1463
static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1464
                    int islit, uint8_t lit)
1465
{
1466
    TCGv va, vb;
1467

    
1468
    if (unlikely(rc == 31)) {
1469
        return;
1470
    }
1471

    
1472
    if (ra == 31) {
1473
        va = tcg_const_i64(0);
1474
    } else {
1475
        va = cpu_ir[ra];
1476
    }
1477
    if (islit) {
1478
        vb = tcg_const_i64(lit);
1479
    } else {
1480
        vb = cpu_ir[rb];
1481
    }
1482

    
1483
    tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1484

    
1485
    if (ra == 31) {
1486
        tcg_temp_free(va);
1487
    }
1488
    if (islit) {
1489
        tcg_temp_free(vb);
1490
    }
1491
}
1492

    
1493
static void gen_rx(int ra, int set)
1494
{
1495
    TCGv_i32 tmp;
1496

    
1497
    if (ra != 31) {
1498
        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1499
    }
1500

    
1501
    tmp = tcg_const_i32(set);
1502
    tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1503
    tcg_temp_free_i32(tmp);
1504
}
1505

    
1506
static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1507
{
1508
    /* We're emulating OSF/1 PALcode.  Many of these are trivial access
1509
       to internal cpu registers.  */
1510

    
1511
    /* Unprivileged PAL call */
1512
    if (palcode >= 0x80 && palcode < 0xC0) {
1513
        switch (palcode) {
1514
        case 0x86:
1515
            /* IMB */
1516
            /* No-op inside QEMU.  */
1517
            break;
1518
        case 0x9E:
1519
            /* RDUNIQUE */
1520
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1521
            break;
1522
        case 0x9F:
1523
            /* WRUNIQUE */
1524
            tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1525
            break;
1526
        default:
1527
            return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1528
        }
1529
        return NO_EXIT;
1530
    }
1531

    
1532
#ifndef CONFIG_USER_ONLY
1533
    /* Privileged PAL code */
1534
    if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1535
        switch (palcode) {
1536
        case 0x01:
1537
            /* CFLUSH */
1538
            /* No-op inside QEMU.  */
1539
            break;
1540
        case 0x02:
1541
            /* DRAINA */
1542
            /* No-op inside QEMU.  */
1543
            break;
1544
        case 0x2D:
1545
            /* WRVPTPTR */
1546
            tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
1547
            break;
1548
        case 0x31:
1549
            /* WRVAL */
1550
            tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1551
            break;
1552
        case 0x32:
1553
            /* RDVAL */
1554
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1555
            break;
1556

    
1557
        case 0x35: {
1558
            /* SWPIPL */
1559
            TCGv tmp;
1560

    
1561
            /* Note that we already know we're in kernel mode, so we know
1562
               that PS only contains the 3 IPL bits.  */
1563
            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1564

    
1565
            /* But make sure and store only the 3 IPL bits from the user.  */
1566
            tmp = tcg_temp_new();
1567
            tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1568
            tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1569
            tcg_temp_free(tmp);
1570
            break;
1571
        }
1572

    
1573
        case 0x36:
1574
            /* RDPS */
1575
            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1576
            break;
1577
        case 0x38:
1578
            /* WRUSP */
1579
            tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1580
            break;
1581
        case 0x3A:
1582
            /* RDUSP */
1583
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1584
            break;
1585
        case 0x3C:
1586
            /* WHAMI */
1587
            tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1588
                              offsetof(CPUAlphaState, cpu_index));
1589
            break;
1590

    
1591
        default:
1592
            return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1593
        }
1594
        return NO_EXIT;
1595
    }
1596
#endif
1597

    
1598
    return gen_invalid(ctx);
1599
}
1600

    
1601
#ifndef CONFIG_USER_ONLY
1602

    
1603
#define PR_BYTE         0x100000
1604
#define PR_LONG         0x200000
1605

    
1606
static int cpu_pr_data(int pr)
1607
{
1608
    switch (pr) {
1609
    case  0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1610
    case  1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1611
    case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1612
    case  3: return offsetof(CPUAlphaState, trap_arg0);
1613
    case  4: return offsetof(CPUAlphaState, trap_arg1);
1614
    case  5: return offsetof(CPUAlphaState, trap_arg2);
1615
    case  6: return offsetof(CPUAlphaState, exc_addr);
1616
    case  7: return offsetof(CPUAlphaState, palbr);
1617
    case  8: return offsetof(CPUAlphaState, ptbr);
1618
    case  9: return offsetof(CPUAlphaState, vptptr);
1619
    case 10: return offsetof(CPUAlphaState, unique);
1620
    case 11: return offsetof(CPUAlphaState, sysval);
1621
    case 12: return offsetof(CPUAlphaState, usp);
1622

    
1623
    case 32 ... 39:
1624
        return offsetof(CPUAlphaState, shadow[pr - 32]);
1625
    case 40 ... 63:
1626
        return offsetof(CPUAlphaState, scratch[pr - 40]);
1627

    
1628
    case 251:
1629
        return offsetof(CPUAlphaState, alarm_expire);
1630
    }
1631
    return 0;
1632
}
1633

    
1634
static ExitStatus gen_mfpr(int ra, int regno)
1635
{
1636
    int data = cpu_pr_data(regno);
1637

    
1638
    /* In our emulated PALcode, these processor registers have no
1639
       side effects from reading.  */
1640
    if (ra == 31) {
1641
        return NO_EXIT;
1642
    }
1643

    
1644
    if (regno == 250) {
1645
        /* WALL_TIME */
1646
        if (use_icount) {
1647
            gen_io_start();
1648
            gen_helper_get_time(cpu_ir[ra]);
1649
            gen_io_end();
1650
            return EXIT_PC_STALE;
1651
        } else {
1652
            gen_helper_get_time(cpu_ir[ra]);
1653
            return NO_EXIT;
1654
        }
1655
    }
1656

    
1657
    /* The basic registers are data only, and unknown registers
1658
       are read-zero, write-ignore.  */
1659
    if (data == 0) {
1660
        tcg_gen_movi_i64(cpu_ir[ra], 0);
1661
    } else if (data & PR_BYTE) {
1662
        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1663
    } else if (data & PR_LONG) {
1664
        tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1665
    } else {
1666
        tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1667
    }
1668
    return NO_EXIT;
1669
}
1670

    
1671
static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
1672
{
1673
    TCGv tmp;
1674
    int data;
1675

    
1676
    if (rb == 31) {
1677
        tmp = tcg_const_i64(0);
1678
    } else {
1679
        tmp = cpu_ir[rb];
1680
    }
1681

    
1682
    switch (regno) {
1683
    case 255:
1684
        /* TBIA */
1685
        gen_helper_tbia(cpu_env);
1686
        break;
1687

    
1688
    case 254:
1689
        /* TBIS */
1690
        gen_helper_tbis(cpu_env, tmp);
1691
        break;
1692

    
1693
    case 253:
1694
        /* WAIT */
1695
        tmp = tcg_const_i64(1);
1696
        tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUAlphaState, halted));
1697
        return gen_excp(ctx, EXCP_HLT, 0);
1698

    
1699
    case 252:
1700
        /* HALT */
1701
        gen_helper_halt(tmp);
1702
        return EXIT_PC_STALE;
1703

    
1704
    case 251:
1705
        /* ALARM */
1706
        gen_helper_set_alarm(cpu_env, tmp);
1707
        break;
1708

    
1709
    default:
1710
        /* The basic registers are data only, and unknown registers
1711
           are read-zero, write-ignore.  */
1712
        data = cpu_pr_data(regno);
1713
        if (data != 0) {
1714
            if (data & PR_BYTE) {
1715
                tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1716
            } else if (data & PR_LONG) {
1717
                tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1718
            } else {
1719
                tcg_gen_st_i64(tmp, cpu_env, data);
1720
            }
1721
        }
1722
        break;
1723
    }
1724

    
1725
    if (rb == 31) {
1726
        tcg_temp_free(tmp);
1727
    }
1728

    
1729
    return NO_EXIT;
1730
}
1731
#endif /* !USER_ONLY*/
1732

    
1733
static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1734
{
1735
    uint32_t palcode;
1736
    int32_t disp21, disp16;
1737
#ifndef CONFIG_USER_ONLY
1738
    int32_t disp12;
1739
#endif
1740
    uint16_t fn11;
1741
    uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
1742
    uint8_t lit;
1743
    ExitStatus ret;
1744

    
1745
    /* Decode all instruction fields */
1746
    opc = insn >> 26;
1747
    ra = (insn >> 21) & 0x1F;
1748
    rb = (insn >> 16) & 0x1F;
1749
    rc = insn & 0x1F;
1750
    real_islit = islit = (insn >> 12) & 1;
1751
    if (rb == 31 && !islit) {
1752
        islit = 1;
1753
        lit = 0;
1754
    } else
1755
        lit = (insn >> 13) & 0xFF;
1756
    palcode = insn & 0x03FFFFFF;
1757
    disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1758
    disp16 = (int16_t)(insn & 0x0000FFFF);
1759
#ifndef CONFIG_USER_ONLY
1760
    disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1761
#endif
1762
    fn11 = (insn >> 5) & 0x000007FF;
1763
    fpfn = fn11 & 0x3F;
1764
    fn7 = (insn >> 5) & 0x0000007F;
1765
    LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1766
              opc, ra, rb, rc, disp16);
1767

    
1768
    ret = NO_EXIT;
1769
    switch (opc) {
1770
    case 0x00:
1771
        /* CALL_PAL */
1772
        ret = gen_call_pal(ctx, palcode);
1773
        break;
1774
    case 0x01:
1775
        /* OPC01 */
1776
        goto invalid_opc;
1777
    case 0x02:
1778
        /* OPC02 */
1779
        goto invalid_opc;
1780
    case 0x03:
1781
        /* OPC03 */
1782
        goto invalid_opc;
1783
    case 0x04:
1784
        /* OPC04 */
1785
        goto invalid_opc;
1786
    case 0x05:
1787
        /* OPC05 */
1788
        goto invalid_opc;
1789
    case 0x06:
1790
        /* OPC06 */
1791
        goto invalid_opc;
1792
    case 0x07:
1793
        /* OPC07 */
1794
        goto invalid_opc;
1795
    case 0x08:
1796
        /* LDA */
1797
        if (likely(ra != 31)) {
1798
            if (rb != 31)
1799
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1800
            else
1801
                tcg_gen_movi_i64(cpu_ir[ra], disp16);
1802
        }
1803
        break;
1804
    case 0x09:
1805
        /* LDAH */
1806
        if (likely(ra != 31)) {
1807
            if (rb != 31)
1808
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1809
            else
1810
                tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1811
        }
1812
        break;
1813
    case 0x0A:
1814
        /* LDBU */
1815
        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1816
            gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1817
            break;
1818
        }
1819
        goto invalid_opc;
1820
    case 0x0B:
1821
        /* LDQ_U */
1822
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1823
        break;
1824
    case 0x0C:
1825
        /* LDWU */
1826
        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1827
            gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1828
            break;
1829
        }
1830
        goto invalid_opc;
1831
    case 0x0D:
1832
        /* STW */
1833
        gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1834
        break;
1835
    case 0x0E:
1836
        /* STB */
1837
        gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1838
        break;
1839
    case 0x0F:
1840
        /* STQ_U */
1841
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1842
        break;
1843
    case 0x10:
1844
        switch (fn7) {
1845
        case 0x00:
1846
            /* ADDL */
1847
            if (likely(rc != 31)) {
1848
                if (ra != 31) {
1849
                    if (islit) {
1850
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1851
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1852
                    } else {
1853
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1854
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1855
                    }
1856
                } else {
1857
                    if (islit)
1858
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1859
                    else
1860
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1861
                }
1862
            }
1863
            break;
1864
        case 0x02:
1865
            /* S4ADDL */
1866
            if (likely(rc != 31)) {
1867
                if (ra != 31) {
1868
                    TCGv tmp = tcg_temp_new();
1869
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1870
                    if (islit)
1871
                        tcg_gen_addi_i64(tmp, tmp, lit);
1872
                    else
1873
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1874
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1875
                    tcg_temp_free(tmp);
1876
                } else {
1877
                    if (islit)
1878
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1879
                    else
1880
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1881
                }
1882
            }
1883
            break;
1884
        case 0x09:
1885
            /* SUBL */
1886
            if (likely(rc != 31)) {
1887
                if (ra != 31) {
1888
                    if (islit)
1889
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1890
                    else
1891
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1892
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1893
                } else {
1894
                    if (islit)
1895
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1896
                    else {
1897
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1898
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1899
                }
1900
            }
1901
            break;
1902
        case 0x0B:
1903
            /* S4SUBL */
1904
            if (likely(rc != 31)) {
1905
                if (ra != 31) {
1906
                    TCGv tmp = tcg_temp_new();
1907
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1908
                    if (islit)
1909
                        tcg_gen_subi_i64(tmp, tmp, lit);
1910
                    else
1911
                        tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1912
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1913
                    tcg_temp_free(tmp);
1914
                } else {
1915
                    if (islit)
1916
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1917
                    else {
1918
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1919
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1920
                    }
1921
                }
1922
            }
1923
            break;
1924
        case 0x0F:
1925
            /* CMPBGE */
1926
            gen_cmpbge(ra, rb, rc, islit, lit);
1927
            break;
1928
        case 0x12:
1929
            /* S8ADDL */
1930
            if (likely(rc != 31)) {
1931
                if (ra != 31) {
1932
                    TCGv tmp = tcg_temp_new();
1933
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1934
                    if (islit)
1935
                        tcg_gen_addi_i64(tmp, tmp, lit);
1936
                    else
1937
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1938
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1939
                    tcg_temp_free(tmp);
1940
                } else {
1941
                    if (islit)
1942
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1943
                    else
1944
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1945
                }
1946
            }
1947
            break;
1948
        case 0x1B:
1949
            /* S8SUBL */
1950
            if (likely(rc != 31)) {
1951
                if (ra != 31) {
1952
                    TCGv tmp = tcg_temp_new();
1953
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1954
                    if (islit)
1955
                        tcg_gen_subi_i64(tmp, tmp, lit);
1956
                    else
1957
                       tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1958
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1959
                    tcg_temp_free(tmp);
1960
                } else {
1961
                    if (islit)
1962
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1963
                    else
1964
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1965
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1966
                    }
1967
                }
1968
            }
1969
            break;
1970
        case 0x1D:
1971
            /* CMPULT */
1972
            gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1973
            break;
1974
        case 0x20:
1975
            /* ADDQ */
1976
            if (likely(rc != 31)) {
1977
                if (ra != 31) {
1978
                    if (islit)
1979
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1980
                    else
1981
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1982
                } else {
1983
                    if (islit)
1984
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1985
                    else
1986
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1987
                }
1988
            }
1989
            break;
1990
        case 0x22:
1991
            /* S4ADDQ */
1992
            if (likely(rc != 31)) {
1993
                if (ra != 31) {
1994
                    TCGv tmp = tcg_temp_new();
1995
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1996
                    if (islit)
1997
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1998
                    else
1999
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2000
                    tcg_temp_free(tmp);
2001
                } else {
2002
                    if (islit)
2003
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2004
                    else
2005
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2006
                }
2007
            }
2008
            break;
2009
        case 0x29:
2010
            /* SUBQ */
2011
            if (likely(rc != 31)) {
2012
                if (ra != 31) {
2013
                    if (islit)
2014
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2015
                    else
2016
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2017
                } else {
2018
                    if (islit)
2019
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
2020
                    else
2021
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2022
                }
2023
            }
2024
            break;
2025
        case 0x2B:
2026
            /* S4SUBQ */
2027
            if (likely(rc != 31)) {
2028
                if (ra != 31) {
2029
                    TCGv tmp = tcg_temp_new();
2030
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2031
                    if (islit)
2032
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2033
                    else
2034
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2035
                    tcg_temp_free(tmp);
2036
                } else {
2037
                    if (islit)
2038
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
2039
                    else
2040
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2041
                }
2042
            }
2043
            break;
2044
        case 0x2D:
2045
            /* CMPEQ */
2046
            gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
2047
            break;
2048
        case 0x32:
2049
            /* S8ADDQ */
2050
            if (likely(rc != 31)) {
2051
                if (ra != 31) {
2052
                    TCGv tmp = tcg_temp_new();
2053
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2054
                    if (islit)
2055
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2056
                    else
2057
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2058
                    tcg_temp_free(tmp);
2059
                } else {
2060
                    if (islit)
2061
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2062
                    else
2063
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2064
                }
2065
            }
2066
            break;
2067
        case 0x3B:
2068
            /* S8SUBQ */
2069
            if (likely(rc != 31)) {
2070
                if (ra != 31) {
2071
                    TCGv tmp = tcg_temp_new();
2072
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2073
                    if (islit)
2074
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2075
                    else
2076
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2077
                    tcg_temp_free(tmp);
2078
                } else {
2079
                    if (islit)
2080
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
2081
                    else
2082
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2083
                }
2084
            }
2085
            break;
2086
        case 0x3D:
2087
            /* CMPULE */
2088
            gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2089
            break;
2090
        case 0x40:
2091
            /* ADDL/V */
2092
            gen_addlv(ra, rb, rc, islit, lit);
2093
            break;
2094
        case 0x49:
2095
            /* SUBL/V */
2096
            gen_sublv(ra, rb, rc, islit, lit);
2097
            break;
2098
        case 0x4D:
2099
            /* CMPLT */
2100
            gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2101
            break;
2102
        case 0x60:
2103
            /* ADDQ/V */
2104
            gen_addqv(ra, rb, rc, islit, lit);
2105
            break;
2106
        case 0x69:
2107
            /* SUBQ/V */
2108
            gen_subqv(ra, rb, rc, islit, lit);
2109
            break;
2110
        case 0x6D:
2111
            /* CMPLE */
2112
            gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2113
            break;
2114
        default:
2115
            goto invalid_opc;
2116
        }
2117
        break;
2118
    case 0x11:
2119
        switch (fn7) {
2120
        case 0x00:
2121
            /* AND */
2122
            if (likely(rc != 31)) {
2123
                if (ra == 31)
2124
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2125
                else if (islit)
2126
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2127
                else
2128
                    tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2129
            }
2130
            break;
2131
        case 0x08:
2132
            /* BIC */
2133
            if (likely(rc != 31)) {
2134
                if (ra != 31) {
2135
                    if (islit)
2136
                        tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2137
                    else
2138
                        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2139
                } else
2140
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2141
            }
2142
            break;
2143
        case 0x14:
2144
            /* CMOVLBS */
2145
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2146
            break;
2147
        case 0x16:
2148
            /* CMOVLBC */
2149
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2150
            break;
2151
        case 0x20:
2152
            /* BIS */
2153
            if (likely(rc != 31)) {
2154
                if (ra != 31) {
2155
                    if (islit)
2156
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2157
                    else
2158
                        tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2159
                } else {
2160
                    if (islit)
2161
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2162
                    else
2163
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2164
                }
2165
            }
2166
            break;
2167
        case 0x24:
2168
            /* CMOVEQ */
2169
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2170
            break;
2171
        case 0x26:
2172
            /* CMOVNE */
2173
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2174
            break;
2175
        case 0x28:
2176
            /* ORNOT */
2177
            if (likely(rc != 31)) {
2178
                if (ra != 31) {
2179
                    if (islit)
2180
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2181
                    else
2182
                        tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2183
                } else {
2184
                    if (islit)
2185
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2186
                    else
2187
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2188
                }
2189
            }
2190
            break;
2191
        case 0x40:
2192
            /* XOR */
2193
            if (likely(rc != 31)) {
2194
                if (ra != 31) {
2195
                    if (islit)
2196
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2197
                    else
2198
                        tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2199
                } else {
2200
                    if (islit)
2201
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2202
                    else
2203
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2204
                }
2205
            }
2206
            break;
2207
        case 0x44:
2208
            /* CMOVLT */
2209
            gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2210
            break;
2211
        case 0x46:
2212
            /* CMOVGE */
2213
            gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2214
            break;
2215
        case 0x48:
2216
            /* EQV */
2217
            if (likely(rc != 31)) {
2218
                if (ra != 31) {
2219
                    if (islit)
2220
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2221
                    else
2222
                        tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2223
                } else {
2224
                    if (islit)
2225
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2226
                    else
2227
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2228
                }
2229
            }
2230
            break;
2231
        case 0x61:
2232
            /* AMASK */
2233
            if (likely(rc != 31)) {
2234
                uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2235

    
2236
                if (islit) {
2237
                    tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2238
                } else {
2239
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2240
                }
2241
            }
2242
            break;
2243
        case 0x64:
2244
            /* CMOVLE */
2245
            gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2246
            break;
2247
        case 0x66:
2248
            /* CMOVGT */
2249
            gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2250
            break;
2251
        case 0x6C:
2252
            /* IMPLVER */
2253
            if (rc != 31)
2254
                tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2255
            break;
2256
        default:
2257
            goto invalid_opc;
2258
        }
2259
        break;
2260
    case 0x12:
2261
        switch (fn7) {
2262
        case 0x02:
2263
            /* MSKBL */
2264
            gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2265
            break;
2266
        case 0x06:
2267
            /* EXTBL */
2268
            gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2269
            break;
2270
        case 0x0B:
2271
            /* INSBL */
2272
            gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2273
            break;
2274
        case 0x12:
2275
            /* MSKWL */
2276
            gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2277
            break;
2278
        case 0x16:
2279
            /* EXTWL */
2280
            gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2281
            break;
2282
        case 0x1B:
2283
            /* INSWL */
2284
            gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2285
            break;
2286
        case 0x22:
2287
            /* MSKLL */
2288
            gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2289
            break;
2290
        case 0x26:
2291
            /* EXTLL */
2292
            gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2293
            break;
2294
        case 0x2B:
2295
            /* INSLL */
2296
            gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2297
            break;
2298
        case 0x30:
2299
            /* ZAP */
2300
            gen_zap(ra, rb, rc, islit, lit);
2301
            break;
2302
        case 0x31:
2303
            /* ZAPNOT */
2304
            gen_zapnot(ra, rb, rc, islit, lit);
2305
            break;
2306
        case 0x32:
2307
            /* MSKQL */
2308
            gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2309
            break;
2310
        case 0x34:
2311
            /* SRL */
2312
            if (likely(rc != 31)) {
2313
                if (ra != 31) {
2314
                    if (islit)
2315
                        tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2316
                    else {
2317
                        TCGv shift = tcg_temp_new();
2318
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2319
                        tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2320
                        tcg_temp_free(shift);
2321
                    }
2322
                } else
2323
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2324
            }
2325
            break;
2326
        case 0x36:
2327
            /* EXTQL */
2328
            gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2329
            break;
2330
        case 0x39:
2331
            /* SLL */
2332
            if (likely(rc != 31)) {
2333
                if (ra != 31) {
2334
                    if (islit)
2335
                        tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2336
                    else {
2337
                        TCGv shift = tcg_temp_new();
2338
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2339
                        tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2340
                        tcg_temp_free(shift);
2341
                    }
2342
                } else
2343
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2344
            }
2345
            break;
2346
        case 0x3B:
2347
            /* INSQL */
2348
            gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2349
            break;
2350
        case 0x3C:
2351
            /* SRA */
2352
            if (likely(rc != 31)) {
2353
                if (ra != 31) {
2354
                    if (islit)
2355
                        tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2356
                    else {
2357
                        TCGv shift = tcg_temp_new();
2358
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2359
                        tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2360
                        tcg_temp_free(shift);
2361
                    }
2362
                } else
2363
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2364
            }
2365
            break;
2366
        case 0x52:
2367
            /* MSKWH */
2368
            gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2369
            break;
2370
        case 0x57:
2371
            /* INSWH */
2372
            gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2373
            break;
2374
        case 0x5A:
2375
            /* EXTWH */
2376
            gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2377
            break;
2378
        case 0x62:
2379
            /* MSKLH */
2380
            gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2381
            break;
2382
        case 0x67:
2383
            /* INSLH */
2384
            gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2385
            break;
2386
        case 0x6A:
2387
            /* EXTLH */
2388
            gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2389
            break;
2390
        case 0x72:
2391
            /* MSKQH */
2392
            gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2393
            break;
2394
        case 0x77:
2395
            /* INSQH */
2396
            gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2397
            break;
2398
        case 0x7A:
2399
            /* EXTQH */
2400
            gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2401
            break;
2402
        default:
2403
            goto invalid_opc;
2404
        }
2405
        break;
2406
    case 0x13:
2407
        switch (fn7) {
2408
        case 0x00:
2409
            /* MULL */
2410
            if (likely(rc != 31)) {
2411
                if (ra == 31)
2412
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2413
                else {
2414
                    if (islit)
2415
                        tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2416
                    else
2417
                        tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2418
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2419
                }
2420
            }
2421
            break;
2422
        case 0x20:
2423
            /* MULQ */
2424
            if (likely(rc != 31)) {
2425
                if (ra == 31)
2426
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2427
                else if (islit)
2428
                    tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2429
                else
2430
                    tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2431
            }
2432
            break;
2433
        case 0x30:
2434
            /* UMULH */
2435
            gen_umulh(ra, rb, rc, islit, lit);
2436
            break;
2437
        case 0x40:
2438
            /* MULL/V */
2439
            gen_mullv(ra, rb, rc, islit, lit);
2440
            break;
2441
        case 0x60:
2442
            /* MULQ/V */
2443
            gen_mulqv(ra, rb, rc, islit, lit);
2444
            break;
2445
        default:
2446
            goto invalid_opc;
2447
        }
2448
        break;
2449
    case 0x14:
2450
        switch (fpfn) { /* fn11 & 0x3F */
2451
        case 0x04:
2452
            /* ITOFS */
2453
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2454
                goto invalid_opc;
2455
            }
2456
            if (likely(rc != 31)) {
2457
                if (ra != 31) {
2458
                    TCGv_i32 tmp = tcg_temp_new_i32();
2459
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2460
                    gen_helper_memory_to_s(cpu_fir[rc], tmp);
2461
                    tcg_temp_free_i32(tmp);
2462
                } else
2463
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2464
            }
2465
            break;
2466
        case 0x0A:
2467
            /* SQRTF */
2468
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2469
                gen_fsqrtf(rb, rc);
2470
                break;
2471
            }
2472
            goto invalid_opc;
2473
        case 0x0B:
2474
            /* SQRTS */
2475
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2476
                gen_fsqrts(ctx, rb, rc, fn11);
2477
                break;
2478
            }
2479
            goto invalid_opc;
2480
        case 0x14:
2481
            /* ITOFF */
2482
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2483
                goto invalid_opc;
2484
            }
2485
            if (likely(rc != 31)) {
2486
                if (ra != 31) {
2487
                    TCGv_i32 tmp = tcg_temp_new_i32();
2488
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2489
                    gen_helper_memory_to_f(cpu_fir[rc], tmp);
2490
                    tcg_temp_free_i32(tmp);
2491
                } else
2492
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2493
            }
2494
            break;
2495
        case 0x24:
2496
            /* ITOFT */
2497
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2498
                goto invalid_opc;
2499
            }
2500
            if (likely(rc != 31)) {
2501
                if (ra != 31)
2502
                    tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2503
                else
2504
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2505
            }
2506
            break;
2507
        case 0x2A:
2508
            /* SQRTG */
2509
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2510
                gen_fsqrtg(rb, rc);
2511
                break;
2512
            }
2513
            goto invalid_opc;
2514
        case 0x02B:
2515
            /* SQRTT */
2516
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2517
                gen_fsqrtt(ctx, rb, rc, fn11);
2518
                break;
2519
            }
2520
            goto invalid_opc;
2521
        default:
2522
            goto invalid_opc;
2523
        }
2524
        break;
2525
    case 0x15:
2526
        /* VAX floating point */
2527
        /* XXX: rounding mode and trap are ignored (!) */
2528
        switch (fpfn) { /* fn11 & 0x3F */
2529
        case 0x00:
2530
            /* ADDF */
2531
            gen_faddf(ra, rb, rc);
2532
            break;
2533
        case 0x01:
2534
            /* SUBF */
2535
            gen_fsubf(ra, rb, rc);
2536
            break;
2537
        case 0x02:
2538
            /* MULF */
2539
            gen_fmulf(ra, rb, rc);
2540
            break;
2541
        case 0x03:
2542
            /* DIVF */
2543
            gen_fdivf(ra, rb, rc);
2544
            break;
2545
        case 0x1E:
2546
            /* CVTDG */
2547
#if 0 // TODO
2548
            gen_fcvtdg(rb, rc);
2549
#else
2550
            goto invalid_opc;
2551
#endif
2552
            break;
2553
        case 0x20:
2554
            /* ADDG */
2555
            gen_faddg(ra, rb, rc);
2556
            break;
2557
        case 0x21:
2558
            /* SUBG */
2559
            gen_fsubg(ra, rb, rc);
2560
            break;
2561
        case 0x22:
2562
            /* MULG */
2563
            gen_fmulg(ra, rb, rc);
2564
            break;
2565
        case 0x23:
2566
            /* DIVG */
2567
            gen_fdivg(ra, rb, rc);
2568
            break;
2569
        case 0x25:
2570
            /* CMPGEQ */
2571
            gen_fcmpgeq(ra, rb, rc);
2572
            break;
2573
        case 0x26:
2574
            /* CMPGLT */
2575
            gen_fcmpglt(ra, rb, rc);
2576
            break;
2577
        case 0x27:
2578
            /* CMPGLE */
2579
            gen_fcmpgle(ra, rb, rc);
2580
            break;
2581
        case 0x2C:
2582
            /* CVTGF */
2583
            gen_fcvtgf(rb, rc);
2584
            break;
2585
        case 0x2D:
2586
            /* CVTGD */
2587
#if 0 // TODO
2588
            gen_fcvtgd(rb, rc);
2589
#else
2590
            goto invalid_opc;
2591
#endif
2592
            break;
2593
        case 0x2F:
2594
            /* CVTGQ */
2595
            gen_fcvtgq(rb, rc);
2596
            break;
2597
        case 0x3C:
2598
            /* CVTQF */
2599
            gen_fcvtqf(rb, rc);
2600
            break;
2601
        case 0x3E:
2602
            /* CVTQG */
2603
            gen_fcvtqg(rb, rc);
2604
            break;
2605
        default:
2606
            goto invalid_opc;
2607
        }
2608
        break;
2609
    case 0x16:
2610
        /* IEEE floating-point */
2611
        switch (fpfn) { /* fn11 & 0x3F */
2612
        case 0x00:
2613
            /* ADDS */
2614
            gen_fadds(ctx, ra, rb, rc, fn11);
2615
            break;
2616
        case 0x01:
2617
            /* SUBS */
2618
            gen_fsubs(ctx, ra, rb, rc, fn11);
2619
            break;
2620
        case 0x02:
2621
            /* MULS */
2622
            gen_fmuls(ctx, ra, rb, rc, fn11);
2623
            break;
2624
        case 0x03:
2625
            /* DIVS */
2626
            gen_fdivs(ctx, ra, rb, rc, fn11);
2627
            break;
2628
        case 0x20:
2629
            /* ADDT */
2630
            gen_faddt(ctx, ra, rb, rc, fn11);
2631
            break;
2632
        case 0x21:
2633
            /* SUBT */
2634
            gen_fsubt(ctx, ra, rb, rc, fn11);
2635
            break;
2636
        case 0x22:
2637
            /* MULT */
2638
            gen_fmult(ctx, ra, rb, rc, fn11);
2639
            break;
2640
        case 0x23:
2641
            /* DIVT */
2642
            gen_fdivt(ctx, ra, rb, rc, fn11);
2643
            break;
2644
        case 0x24:
2645
            /* CMPTUN */
2646
            gen_fcmptun(ctx, ra, rb, rc, fn11);
2647
            break;
2648
        case 0x25:
2649
            /* CMPTEQ */
2650
            gen_fcmpteq(ctx, ra, rb, rc, fn11);
2651
            break;
2652
        case 0x26:
2653
            /* CMPTLT */
2654
            gen_fcmptlt(ctx, ra, rb, rc, fn11);
2655
            break;
2656
        case 0x27:
2657
            /* CMPTLE */
2658
            gen_fcmptle(ctx, ra, rb, rc, fn11);
2659
            break;
2660
        case 0x2C:
2661
            if (fn11 == 0x2AC || fn11 == 0x6AC) {
2662
                /* CVTST */
2663
                gen_fcvtst(ctx, rb, rc, fn11);
2664
            } else {
2665
                /* CVTTS */
2666
                gen_fcvtts(ctx, rb, rc, fn11);
2667
            }
2668
            break;
2669
        case 0x2F:
2670
            /* CVTTQ */
2671
            gen_fcvttq(ctx, rb, rc, fn11);
2672
            break;
2673
        case 0x3C:
2674
            /* CVTQS */
2675
            gen_fcvtqs(ctx, rb, rc, fn11);
2676
            break;
2677
        case 0x3E:
2678
            /* CVTQT */
2679
            gen_fcvtqt(ctx, rb, rc, fn11);
2680
            break;
2681
        default:
2682
            goto invalid_opc;
2683
        }
2684
        break;
2685
    case 0x17:
2686
        switch (fn11) {
2687
        case 0x010:
2688
            /* CVTLQ */
2689
            gen_fcvtlq(rb, rc);
2690
            break;
2691
        case 0x020:
2692
            if (likely(rc != 31)) {
2693
                if (ra == rb) {
2694
                    /* FMOV */
2695
                    if (ra == 31)
2696
                        tcg_gen_movi_i64(cpu_fir[rc], 0);
2697
                    else
2698
                        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2699
                } else {
2700
                    /* CPYS */
2701
                    gen_fcpys(ra, rb, rc);
2702
                }
2703
            }
2704
            break;
2705
        case 0x021:
2706
            /* CPYSN */
2707
            gen_fcpysn(ra, rb, rc);
2708
            break;
2709
        case 0x022:
2710
            /* CPYSE */
2711
            gen_fcpyse(ra, rb, rc);
2712
            break;
2713
        case 0x024:
2714
            /* MT_FPCR */
2715
            if (likely(ra != 31))
2716
                gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
2717
            else {
2718
                TCGv tmp = tcg_const_i64(0);
2719
                gen_helper_store_fpcr(cpu_env, tmp);
2720
                tcg_temp_free(tmp);
2721
            }
2722
            break;
2723
        case 0x025:
2724
            /* MF_FPCR */
2725
            if (likely(ra != 31))
2726
                gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
2727
            break;
2728
        case 0x02A:
2729
            /* FCMOVEQ */
2730
            gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2731
            break;
2732
        case 0x02B:
2733
            /* FCMOVNE */
2734
            gen_fcmov(TCG_COND_NE, ra, rb, rc);
2735
            break;
2736
        case 0x02C:
2737
            /* FCMOVLT */
2738
            gen_fcmov(TCG_COND_LT, ra, rb, rc);
2739
            break;
2740
        case 0x02D:
2741
            /* FCMOVGE */
2742
            gen_fcmov(TCG_COND_GE, ra, rb, rc);
2743
            break;
2744
        case 0x02E:
2745
            /* FCMOVLE */
2746
            gen_fcmov(TCG_COND_LE, ra, rb, rc);
2747
            break;
2748
        case 0x02F:
2749
            /* FCMOVGT */
2750
            gen_fcmov(TCG_COND_GT, ra, rb, rc);
2751
            break;
2752
        case 0x030:
2753
            /* CVTQL */
2754
            gen_fcvtql(rb, rc);
2755
            break;
2756
        case 0x130:
2757
            /* CVTQL/V */
2758
        case 0x530:
2759
            /* CVTQL/SV */
2760
            /* ??? I'm pretty sure there's nothing that /sv needs to do that
2761
               /v doesn't do.  The only thing I can think is that /sv is a
2762
               valid instruction merely for completeness in the ISA.  */
2763
            gen_fcvtql_v(ctx, rb, rc);
2764
            break;
2765
        default:
2766
            goto invalid_opc;
2767
        }
2768
        break;
2769
    case 0x18:
2770
        switch ((uint16_t)disp16) {
2771
        case 0x0000:
2772
            /* TRAPB */
2773
            /* No-op.  */
2774
            break;
2775
        case 0x0400:
2776
            /* EXCB */
2777
            /* No-op.  */
2778
            break;
2779
        case 0x4000:
2780
            /* MB */
2781
            /* No-op */
2782
            break;
2783
        case 0x4400:
2784
            /* WMB */
2785
            /* No-op */
2786
            break;
2787
        case 0x8000:
2788
            /* FETCH */
2789
            /* No-op */
2790
            break;
2791
        case 0xA000:
2792
            /* FETCH_M */
2793
            /* No-op */
2794
            break;
2795
        case 0xC000:
2796
            /* RPCC */
2797
            if (ra != 31) {
2798
                if (use_icount) {
2799
                    gen_io_start();
2800
                    gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2801
                    gen_io_end();
2802
                    ret = EXIT_PC_STALE;
2803
                } else {
2804
                    gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2805
                }
2806
            }
2807
            break;
2808
        case 0xE000:
2809
            /* RC */
2810
            gen_rx(ra, 0);
2811
            break;
2812
        case 0xE800:
2813
            /* ECB */
2814
            break;
2815
        case 0xF000:
2816
            /* RS */
2817
            gen_rx(ra, 1);
2818
            break;
2819
        case 0xF800:
2820
            /* WH64 */
2821
            /* No-op */
2822
            break;
2823
        default:
2824
            goto invalid_opc;
2825
        }
2826
        break;
2827
    case 0x19:
2828
        /* HW_MFPR (PALcode) */
2829
#ifndef CONFIG_USER_ONLY
2830
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2831
            return gen_mfpr(ra, insn & 0xffff);
2832
        }
2833
#endif
2834
        goto invalid_opc;
2835
    case 0x1A:
2836
        /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2837
           prediction stack action, which of course we don't implement.  */
2838
        if (rb != 31) {
2839
            tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2840
        } else {
2841
            tcg_gen_movi_i64(cpu_pc, 0);
2842
        }
2843
        if (ra != 31) {
2844
            tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2845
        }
2846
        ret = EXIT_PC_UPDATED;
2847
        break;
2848
    case 0x1B:
2849
        /* HW_LD (PALcode) */
2850
#ifndef CONFIG_USER_ONLY
2851
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2852
            TCGv addr;
2853

    
2854
            if (ra == 31) {
2855
                break;
2856
            }
2857

    
2858
            addr = tcg_temp_new();
2859
            if (rb != 31)
2860
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2861
            else
2862
                tcg_gen_movi_i64(addr, disp12);
2863
            switch ((insn >> 12) & 0xF) {
2864
            case 0x0:
2865
                /* Longword physical access (hw_ldl/p) */
2866
                gen_helper_ldl_phys(cpu_ir[ra], addr);
2867
                break;
2868
            case 0x1:
2869
                /* Quadword physical access (hw_ldq/p) */
2870
                gen_helper_ldq_phys(cpu_ir[ra], addr);
2871
                break;
2872
            case 0x2:
2873
                /* Longword physical access with lock (hw_ldl_l/p) */
2874
                gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
2875
                break;
2876
            case 0x3:
2877
                /* Quadword physical access with lock (hw_ldq_l/p) */
2878
                gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
2879
                break;
2880
            case 0x4:
2881
                /* Longword virtual PTE fetch (hw_ldl/v) */
2882
                goto invalid_opc;
2883
            case 0x5:
2884
                /* Quadword virtual PTE fetch (hw_ldq/v) */
2885
                goto invalid_opc;
2886
                break;
2887
            case 0x6:
2888
                /* Incpu_ir[ra]id */
2889
                goto invalid_opc;
2890
            case 0x7:
2891
                /* Incpu_ir[ra]id */
2892
                goto invalid_opc;
2893
            case 0x8:
2894
                /* Longword virtual access (hw_ldl) */
2895
                goto invalid_opc;
2896
            case 0x9:
2897
                /* Quadword virtual access (hw_ldq) */
2898
                goto invalid_opc;
2899
            case 0xA:
2900
                /* Longword virtual access with protection check (hw_ldl/w) */
2901
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2902
                break;
2903
            case 0xB:
2904
                /* Quadword virtual access with protection check (hw_ldq/w) */
2905
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2906
                break;
2907
            case 0xC:
2908
                /* Longword virtual access with alt access mode (hw_ldl/a)*/
2909
                goto invalid_opc;
2910
            case 0xD:
2911
                /* Quadword virtual access with alt access mode (hw_ldq/a) */
2912
                goto invalid_opc;
2913
            case 0xE:
2914
                /* Longword virtual access with alternate access mode and
2915
                   protection checks (hw_ldl/wa) */
2916
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2917
                break;
2918
            case 0xF:
2919
                /* Quadword virtual access with alternate access mode and
2920
                   protection checks (hw_ldq/wa) */
2921
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2922
                break;
2923
            }
2924
            tcg_temp_free(addr);
2925
            break;
2926
        }
2927
#endif
2928
        goto invalid_opc;
2929
    case 0x1C:
2930
        switch (fn7) {
2931
        case 0x00:
2932
            /* SEXTB */
2933
            if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
2934
                goto invalid_opc;
2935
            }
2936
            if (likely(rc != 31)) {
2937
                if (islit)
2938
                    tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2939
                else
2940
                    tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2941
            }
2942
            break;
2943
        case 0x01:
2944
            /* SEXTW */
2945
            if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2946
                if (likely(rc != 31)) {
2947
                    if (islit) {
2948
                        tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2949
                    } else {
2950
                        tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2951
                    }
2952
                }
2953
                break;
2954
            }
2955
            goto invalid_opc;
2956
        case 0x30:
2957
            /* CTPOP */
2958
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2959
                if (likely(rc != 31)) {
2960
                    if (islit) {
2961
                        tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2962
                    } else {
2963
                        gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2964
                    }
2965
                }
2966
                break;
2967
            }
2968
            goto invalid_opc;
2969
        case 0x31:
2970
            /* PERR */
2971
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2972
                gen_perr(ra, rb, rc, islit, lit);
2973
                break;
2974
            }
2975
            goto invalid_opc;
2976
        case 0x32:
2977
            /* CTLZ */
2978
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2979
                if (likely(rc != 31)) {
2980
                    if (islit) {
2981
                        tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2982
                    } else {
2983
                        gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2984
                    }
2985
                }
2986
                break;
2987
            }
2988
            goto invalid_opc;
2989
        case 0x33:
2990
            /* CTTZ */
2991
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2992
                if (likely(rc != 31)) {
2993
                    if (islit) {
2994
                        tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2995
                    } else {
2996
                        gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2997
                    }
2998
                }
2999
                break;
3000
            }
3001
            goto invalid_opc;
3002
        case 0x34:
3003
            /* UNPKBW */
3004
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3005
                if (real_islit || ra != 31) {
3006
                    goto invalid_opc;
3007
                }
3008
                gen_unpkbw(rb, rc);
3009
                break;
3010
            }
3011
            goto invalid_opc;
3012
        case 0x35:
3013
            /* UNPKBL */
3014
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3015
                if (real_islit || ra != 31) {
3016
                    goto invalid_opc;
3017
                }
3018
                gen_unpkbl(rb, rc);
3019
                break;
3020
            }
3021
            goto invalid_opc;
3022
        case 0x36:
3023
            /* PKWB */
3024
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3025
                if (real_islit || ra != 31) {
3026
                    goto invalid_opc;
3027
                }
3028
                gen_pkwb(rb, rc);
3029
                break;
3030
            }
3031
            goto invalid_opc;
3032
        case 0x37:
3033
            /* PKLB */
3034
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3035
                if (real_islit || ra != 31) {
3036
                    goto invalid_opc;
3037
                }
3038
                gen_pklb(rb, rc);
3039
                break;
3040
            }
3041
            goto invalid_opc;
3042
        case 0x38:
3043
            /* MINSB8 */
3044
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3045
                gen_minsb8(ra, rb, rc, islit, lit);
3046
                break;
3047
            }
3048
            goto invalid_opc;
3049
        case 0x39:
3050
            /* MINSW4 */
3051
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3052
                gen_minsw4(ra, rb, rc, islit, lit);
3053
                break;
3054
            }
3055
            goto invalid_opc;
3056
        case 0x3A:
3057
            /* MINUB8 */
3058
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3059
                gen_minub8(ra, rb, rc, islit, lit);
3060
                break;
3061
            }
3062
            goto invalid_opc;
3063
        case 0x3B:
3064
            /* MINUW4 */
3065
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3066
                gen_minuw4(ra, rb, rc, islit, lit);
3067
                break;
3068
            }
3069
            goto invalid_opc;
3070
        case 0x3C:
3071
            /* MAXUB8 */
3072
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3073
                gen_maxub8(ra, rb, rc, islit, lit);
3074
                break;
3075
            }
3076
            goto invalid_opc;
3077
        case 0x3D:
3078
            /* MAXUW4 */
3079
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3080
                gen_maxuw4(ra, rb, rc, islit, lit);
3081
                break;
3082
            }
3083
            goto invalid_opc;
3084
        case 0x3E:
3085
            /* MAXSB8 */
3086
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3087
                gen_maxsb8(ra, rb, rc, islit, lit);
3088
                break;
3089
            }
3090
            goto invalid_opc;
3091
        case 0x3F:
3092
            /* MAXSW4 */
3093
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3094
                gen_maxsw4(ra, rb, rc, islit, lit);
3095
                break;
3096
            }
3097
            goto invalid_opc;
3098
        case 0x70:
3099
            /* FTOIT */
3100
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3101
                goto invalid_opc;
3102
            }
3103
            if (likely(rc != 31)) {
3104
                if (ra != 31)
3105
                    tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3106
                else
3107
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
3108
            }
3109
            break;
3110
        case 0x78:
3111
            /* FTOIS */
3112
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3113
                goto invalid_opc;
3114
            }
3115
            if (rc != 31) {
3116
                TCGv_i32 tmp1 = tcg_temp_new_i32();
3117
                if (ra != 31)
3118
                    gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3119
                else {
3120
                    TCGv tmp2 = tcg_const_i64(0);
3121
                    gen_helper_s_to_memory(tmp1, tmp2);
3122
                    tcg_temp_free(tmp2);
3123
                }
3124
                tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3125
                tcg_temp_free_i32(tmp1);
3126
            }
3127
            break;
3128
        default:
3129
            goto invalid_opc;
3130
        }
3131
        break;
3132
    case 0x1D:
3133
        /* HW_MTPR (PALcode) */
3134
#ifndef CONFIG_USER_ONLY
3135
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3136
            return gen_mtpr(ctx, rb, insn & 0xffff);
3137
        }
3138
#endif
3139
        goto invalid_opc;
3140
    case 0x1E:
3141
        /* HW_RET (PALcode) */
3142
#ifndef CONFIG_USER_ONLY
3143
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3144
            if (rb == 31) {
3145
                /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3146
                   address from EXC_ADDR.  This turns out to be useful for our
3147
                   emulation PALcode, so continue to accept it.  */
3148
                TCGv tmp = tcg_temp_new();
3149
                tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
3150
                gen_helper_hw_ret(cpu_env, tmp);
3151
                tcg_temp_free(tmp);
3152
            } else {
3153
                gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
3154
            }
3155
            ret = EXIT_PC_UPDATED;
3156
            break;
3157
        }
3158
#endif
3159
        goto invalid_opc;
3160
    case 0x1F:
3161
        /* HW_ST (PALcode) */
3162
#ifndef CONFIG_USER_ONLY
3163
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3164
            TCGv addr, val;
3165
            addr = tcg_temp_new();
3166
            if (rb != 31)
3167
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3168
            else
3169
                tcg_gen_movi_i64(addr, disp12);
3170
            if (ra != 31)
3171
                val = cpu_ir[ra];
3172
            else {
3173
                val = tcg_temp_new();
3174
                tcg_gen_movi_i64(val, 0);
3175
            }
3176
            switch ((insn >> 12) & 0xF) {
3177
            case 0x0:
3178
                /* Longword physical access */
3179
                gen_helper_stl_phys(addr, val);
3180
                break;
3181
            case 0x1:
3182
                /* Quadword physical access */
3183
                gen_helper_stq_phys(addr, val);
3184
                break;
3185
            case 0x2:
3186
                /* Longword physical access with lock */
3187
                gen_helper_stl_c_phys(val, cpu_env, addr, val);
3188
                break;
3189
            case 0x3:
3190
                /* Quadword physical access with lock */
3191
                gen_helper_stq_c_phys(val, cpu_env, addr, val);
3192
                break;
3193
            case 0x4:
3194
                /* Longword virtual access */
3195
                goto invalid_opc;
3196
            case 0x5:
3197
                /* Quadword virtual access */
3198
                goto invalid_opc;
3199
            case 0x6:
3200
                /* Invalid */
3201
                goto invalid_opc;
3202
            case 0x7:
3203
                /* Invalid */
3204
                goto invalid_opc;
3205
            case 0x8:
3206
                /* Invalid */
3207
                goto invalid_opc;
3208
            case 0x9:
3209
                /* Invalid */
3210
                goto invalid_opc;
3211
            case 0xA:
3212
                /* Invalid */
3213
                goto invalid_opc;
3214
            case 0xB:
3215
                /* Invalid */
3216
                goto invalid_opc;
3217
            case 0xC:
3218
                /* Longword virtual access with alternate access mode */
3219
                goto invalid_opc;
3220
            case 0xD:
3221
                /* Quadword virtual access with alternate access mode */
3222
                goto invalid_opc;
3223
            case 0xE:
3224
                /* Invalid */
3225
                goto invalid_opc;
3226
            case 0xF:
3227
                /* Invalid */
3228
                goto invalid_opc;
3229
            }
3230
            if (ra == 31)
3231
                tcg_temp_free(val);
3232
            tcg_temp_free(addr);
3233
            break;
3234
        }
3235
#endif
3236
        goto invalid_opc;
3237
    case 0x20:
3238
        /* LDF */
3239
        gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3240
        break;
3241
    case 0x21:
3242
        /* LDG */
3243
        gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3244
        break;
3245
    case 0x22:
3246
        /* LDS */
3247
        gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3248
        break;
3249
    case 0x23:
3250
        /* LDT */
3251
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3252
        break;
3253
    case 0x24:
3254
        /* STF */
3255
        gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3256
        break;
3257
    case 0x25:
3258
        /* STG */
3259
        gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3260
        break;
3261
    case 0x26:
3262
        /* STS */
3263
        gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3264
        break;
3265
    case 0x27:
3266
        /* STT */
3267
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3268
        break;
3269
    case 0x28:
3270
        /* LDL */
3271
        gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3272
        break;
3273
    case 0x29:
3274
        /* LDQ */
3275
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3276
        break;
3277
    case 0x2A:
3278
        /* LDL_L */
3279
        gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3280
        break;
3281
    case 0x2B:
3282
        /* LDQ_L */
3283
        gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3284
        break;
3285
    case 0x2C:
3286
        /* STL */
3287
        gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3288
        break;
3289
    case 0x2D:
3290
        /* STQ */
3291
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3292
        break;
3293
    case 0x2E:
3294
        /* STL_C */
3295
        ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3296
        break;
3297
    case 0x2F:
3298
        /* STQ_C */
3299
        ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3300
        break;
3301
    case 0x30:
3302
        /* BR */
3303
        ret = gen_bdirect(ctx, ra, disp21);
3304
        break;
3305
    case 0x31: /* FBEQ */
3306
        ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3307
        break;
3308
    case 0x32: /* FBLT */
3309
        ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3310
        break;
3311
    case 0x33: /* FBLE */
3312
        ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3313
        break;
3314
    case 0x34:
3315
        /* BSR */
3316
        ret = gen_bdirect(ctx, ra, disp21);
3317
        break;
3318
    case 0x35: /* FBNE */
3319
        ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3320
        break;
3321
    case 0x36: /* FBGE */
3322
        ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3323
        break;
3324
    case 0x37: /* FBGT */
3325
        ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3326
        break;
3327
    case 0x38:
3328
        /* BLBC */
3329
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3330
        break;
3331
    case 0x39:
3332
        /* BEQ */
3333
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3334
        break;
3335
    case 0x3A:
3336
        /* BLT */
3337
        ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3338
        break;
3339
    case 0x3B:
3340
        /* BLE */
3341
        ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3342
        break;
3343
    case 0x3C:
3344
        /* BLBS */
3345
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3346
        break;
3347
    case 0x3D:
3348
        /* BNE */
3349
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3350
        break;
3351
    case 0x3E:
3352
        /* BGE */
3353
        ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3354
        break;
3355
    case 0x3F:
3356
        /* BGT */
3357
        ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3358
        break;
3359
    invalid_opc:
3360
        ret = gen_invalid(ctx);
3361
        break;
3362
    }
3363

    
3364
    return ret;
3365
}
3366

    
3367
static inline void gen_intermediate_code_internal(CPUAlphaState *env,
3368
                                                  TranslationBlock *tb,
3369
                                                  int search_pc)
3370
{
3371
    DisasContext ctx, *ctxp = &ctx;
3372
    target_ulong pc_start;
3373
    uint32_t insn;
3374
    uint16_t *gen_opc_end;
3375
    CPUBreakpoint *bp;
3376
    int j, lj = -1;
3377
    ExitStatus ret;
3378
    int num_insns;
3379
    int max_insns;
3380

    
3381
    pc_start = tb->pc;
3382
    gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3383

    
3384
    ctx.tb = tb;
3385
    ctx.env = env;
3386
    ctx.pc = pc_start;
3387
    ctx.mem_idx = cpu_mmu_index(env);
3388

    
3389
    /* ??? Every TB begins with unset rounding mode, to be initialized on
3390
       the first fp insn of the TB.  Alternately we could define a proper
3391
       default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3392
       to reset the FP_STATUS to that default at the end of any TB that
3393
       changes the default.  We could even (gasp) dynamiclly figure out
3394
       what default would be most efficient given the running program.  */
3395
    ctx.tb_rm = -1;
3396
    /* Similarly for flush-to-zero.  */
3397
    ctx.tb_ftz = -1;
3398

    
3399
    num_insns = 0;
3400
    max_insns = tb->cflags & CF_COUNT_MASK;
3401
    if (max_insns == 0)
3402
        max_insns = CF_COUNT_MASK;
3403

    
3404
    gen_icount_start();
3405
    do {
3406
        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3407
            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3408
                if (bp->pc == ctx.pc) {
3409
                    gen_excp(&ctx, EXCP_DEBUG, 0);
3410
                    break;
3411
                }
3412
            }
3413
        }
3414
        if (search_pc) {
3415
            j = gen_opc_ptr - gen_opc_buf;
3416
            if (lj < j) {
3417
                lj++;
3418
                while (lj < j)
3419
                    gen_opc_instr_start[lj++] = 0;
3420
            }
3421
            gen_opc_pc[lj] = ctx.pc;
3422
            gen_opc_instr_start[lj] = 1;
3423
            gen_opc_icount[lj] = num_insns;
3424
        }
3425
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3426
            gen_io_start();
3427
        insn = cpu_ldl_code(env, ctx.pc);
3428
        num_insns++;
3429

    
3430
        if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3431
            tcg_gen_debug_insn_start(ctx.pc);
3432
        }
3433

    
3434
        ctx.pc += 4;
3435
        ret = translate_one(ctxp, insn);
3436

    
3437
        /* If we reach a page boundary, are single stepping,
3438
           or exhaust instruction count, stop generation.  */
3439
        if (ret == NO_EXIT
3440
            && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3441
                || gen_opc_ptr >= gen_opc_end
3442
                || num_insns >= max_insns
3443
                || singlestep
3444
                || env->singlestep_enabled)) {
3445
            ret = EXIT_PC_STALE;
3446
        }
3447
    } while (ret == NO_EXIT);
3448

    
3449
    if (tb->cflags & CF_LAST_IO) {
3450
        gen_io_end();
3451
    }
3452

    
3453
    switch (ret) {
3454
    case EXIT_GOTO_TB:
3455
    case EXIT_NORETURN:
3456
        break;
3457
    case EXIT_PC_STALE:
3458
        tcg_gen_movi_i64(cpu_pc, ctx.pc);
3459
        /* FALLTHRU */
3460
    case EXIT_PC_UPDATED:
3461
        if (env->singlestep_enabled) {
3462
            gen_excp_1(EXCP_DEBUG, 0);
3463
        } else {
3464
            tcg_gen_exit_tb(0);
3465
        }
3466
        break;
3467
    default:
3468
        abort();
3469
    }
3470

    
3471
    gen_icount_end(tb, num_insns);
3472
    *gen_opc_ptr = INDEX_op_end;
3473
    if (search_pc) {
3474
        j = gen_opc_ptr - gen_opc_buf;
3475
        lj++;
3476
        while (lj <= j)
3477
            gen_opc_instr_start[lj++] = 0;
3478
    } else {
3479
        tb->size = ctx.pc - pc_start;
3480
        tb->icount = num_insns;
3481
    }
3482

    
3483
#ifdef DEBUG_DISAS
3484
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3485
        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3486
        log_target_disas(pc_start, ctx.pc - pc_start, 1);
3487
        qemu_log("\n");
3488
    }
3489
#endif
3490
}
3491

    
3492
void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
3493
{
3494
    gen_intermediate_code_internal(env, tb, 0);
3495
}
3496

    
3497
void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
3498
{
3499
    gen_intermediate_code_internal(env, tb, 1);
3500
}
3501

    
3502
struct cpu_def_t {
3503
    const char *name;
3504
    int implver, amask;
3505
};
3506

    
3507
static const struct cpu_def_t cpu_defs[] = {
3508
    { "ev4",   IMPLVER_2106x, 0 },
3509
    { "ev5",   IMPLVER_21164, 0 },
3510
    { "ev56",  IMPLVER_21164, AMASK_BWX },
3511
    { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3512
    { "ev6",   IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3513
    { "ev67",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3514
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3515
    { "ev68",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3516
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3517
    { "21064", IMPLVER_2106x, 0 },
3518
    { "21164", IMPLVER_21164, 0 },
3519
    { "21164a", IMPLVER_21164, AMASK_BWX },
3520
    { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3521
    { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3522
    { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3523
                                | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3524
};
3525

    
3526
CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3527
{
3528
    AlphaCPU *cpu;
3529
    CPUAlphaState *env;
3530
    int implver, amask, i, max;
3531

    
3532
    cpu = ALPHA_CPU(object_new(TYPE_ALPHA_CPU));
3533
    env = &cpu->env;
3534

    
3535
    alpha_translate_init();
3536

    
3537
    /* Default to ev67; no reason not to emulate insns by default.  */
3538
    implver = IMPLVER_21264;
3539
    amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3540
             | AMASK_TRAP | AMASK_PREFETCH);
3541

    
3542
    max = ARRAY_SIZE(cpu_defs);
3543
    for (i = 0; i < max; i++) {
3544
        if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3545
            implver = cpu_defs[i].implver;
3546
            amask = cpu_defs[i].amask;
3547
            break;
3548
        }
3549
    }
3550
    env->implver = implver;
3551
    env->amask = amask;
3552
    env->cpu_model_str = cpu_model;
3553

    
3554
    qemu_init_vcpu(env);
3555
    return env;
3556
}
3557

    
3558
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
3559
{
3560
    env->pc = gen_opc_pc[pc_pos];
3561
}