Statistics
| Branch: | Revision:

root / target-alpha / translate.c @ 352e48b0

History | View | Annotate | Download (99.9 kB)

1
/*
2
 *  Alpha emulation cpu translation for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include <stdint.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23

    
24
#include "cpu.h"
25
#include "exec-all.h"
26
#include "disas.h"
27
#include "host-utils.h"
28
#include "tcg-op.h"
29
#include "qemu-common.h"
30

    
31
#include "helper.h"
32
#define GEN_HELPER 1
33
#include "helper.h"
34

    
35
#undef ALPHA_DEBUG_DISAS
36
#define CONFIG_SOFTFLOAT_INLINE
37

    
38
#ifdef ALPHA_DEBUG_DISAS
39
#  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40
#else
41
#  define LOG_DISAS(...) do { } while (0)
42
#endif
43

    
44
typedef struct DisasContext DisasContext;
45
struct DisasContext {
46
    struct TranslationBlock *tb;
47
    CPUAlphaState *env;
48
    uint64_t pc;
49
    int mem_idx;
50
#if !defined (CONFIG_USER_ONLY)
51
    int pal_mode;
52
#endif
53
    uint32_t amask;
54

    
55
    /* Current rounding mode for this TB.  */
56
    int tb_rm;
57
    /* Current flush-to-zero setting for this TB.  */
58
    int tb_ftz;
59
};
60

    
61
/* Return values from translate_one, indicating the state of the TB.
62
   Note that zero indicates that we are not exiting the TB.  */
63

    
64
typedef enum {
65
    NO_EXIT,
66

    
67
    /* We have emitted one or more goto_tb.  No fixup required.  */
68
    EXIT_GOTO_TB,
69

    
70
    /* We are not using a goto_tb (for whatever reason), but have updated
71
       the PC (for whatever reason), so there's no need to do it again on
72
       exiting the TB.  */
73
    EXIT_PC_UPDATED,
74

    
75
    /* We are exiting the TB, but have neither emitted a goto_tb, nor
76
       updated the PC for the next instruction to be executed.  */
77
    EXIT_PC_STALE,
78

    
79
    /* We are ending the TB with a noreturn function call, e.g. longjmp.
80
       No following code will be executed.  */
81
    EXIT_NORETURN,
82
} ExitStatus;
83

    
84
/* global register indexes */
85
static TCGv_ptr cpu_env;
86
static TCGv cpu_ir[31];
87
static TCGv cpu_fir[31];
88
static TCGv cpu_pc;
89
static TCGv cpu_lock_addr;
90
static TCGv cpu_lock_st_addr;
91
static TCGv cpu_lock_value;
92
#ifdef CONFIG_USER_ONLY
93
static TCGv cpu_uniq;
94
#endif
95

    
96
/* register names */
97
static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
98

    
99
#include "gen-icount.h"
100

    
101
static void alpha_translate_init(void)
102
{
103
    int i;
104
    char *p;
105
    static int done_init = 0;
106

    
107
    if (done_init)
108
        return;
109

    
110
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111

    
112
    p = cpu_reg_names;
113
    for (i = 0; i < 31; i++) {
114
        sprintf(p, "ir%d", i);
115
        cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
116
                                           offsetof(CPUState, ir[i]), p);
117
        p += (i < 10) ? 4 : 5;
118

    
119
        sprintf(p, "fir%d", i);
120
        cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
121
                                            offsetof(CPUState, fir[i]), p);
122
        p += (i < 10) ? 5 : 6;
123
    }
124

    
125
    cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
126
                                    offsetof(CPUState, pc), "pc");
127

    
128
    cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
129
                                           offsetof(CPUState, lock_addr),
130
                                           "lock_addr");
131
    cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
132
                                              offsetof(CPUState, lock_st_addr),
133
                                              "lock_st_addr");
134
    cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
135
                                            offsetof(CPUState, lock_value),
136
                                            "lock_value");
137

    
138
#ifdef CONFIG_USER_ONLY
139
    cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
140
                                      offsetof(CPUState, unique), "uniq");
141
#endif
142

    
143
    /* register helpers */
144
#define GEN_HELPER 2
145
#include "helper.h"
146

    
147
    done_init = 1;
148
}
149

    
150
static void gen_excp_1(int exception, int error_code)
151
{
152
    TCGv_i32 tmp1, tmp2;
153

    
154
    tmp1 = tcg_const_i32(exception);
155
    tmp2 = tcg_const_i32(error_code);
156
    gen_helper_excp(tmp1, tmp2);
157
    tcg_temp_free_i32(tmp2);
158
    tcg_temp_free_i32(tmp1);
159
}
160

    
161
static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
162
{
163
    tcg_gen_movi_i64(cpu_pc, ctx->pc);
164
    gen_excp_1(exception, error_code);
165
    return EXIT_NORETURN;
166
}
167

    
168
static inline ExitStatus gen_invalid(DisasContext *ctx)
169
{
170
    return gen_excp(ctx, EXCP_OPCDEC, 0);
171
}
172

    
173
static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
174
{
175
    TCGv tmp = tcg_temp_new();
176
    TCGv_i32 tmp32 = tcg_temp_new_i32();
177
    tcg_gen_qemu_ld32u(tmp, t1, flags);
178
    tcg_gen_trunc_i64_i32(tmp32, tmp);
179
    gen_helper_memory_to_f(t0, tmp32);
180
    tcg_temp_free_i32(tmp32);
181
    tcg_temp_free(tmp);
182
}
183

    
184
static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
185
{
186
    TCGv tmp = tcg_temp_new();
187
    tcg_gen_qemu_ld64(tmp, t1, flags);
188
    gen_helper_memory_to_g(t0, tmp);
189
    tcg_temp_free(tmp);
190
}
191

    
192
static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
193
{
194
    TCGv tmp = tcg_temp_new();
195
    TCGv_i32 tmp32 = tcg_temp_new_i32();
196
    tcg_gen_qemu_ld32u(tmp, t1, flags);
197
    tcg_gen_trunc_i64_i32(tmp32, tmp);
198
    gen_helper_memory_to_s(t0, tmp32);
199
    tcg_temp_free_i32(tmp32);
200
    tcg_temp_free(tmp);
201
}
202

    
203
static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
204
{
205
    tcg_gen_qemu_ld32s(t0, t1, flags);
206
    tcg_gen_mov_i64(cpu_lock_addr, t1);
207
    tcg_gen_mov_i64(cpu_lock_value, t0);
208
}
209

    
210
static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
211
{
212
    tcg_gen_qemu_ld64(t0, t1, flags);
213
    tcg_gen_mov_i64(cpu_lock_addr, t1);
214
    tcg_gen_mov_i64(cpu_lock_value, t0);
215
}
216

    
217
static inline void gen_load_mem(DisasContext *ctx,
218
                                void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
219
                                                          int flags),
220
                                int ra, int rb, int32_t disp16, int fp,
221
                                int clear)
222
{
223
    TCGv addr, va;
224

    
225
    /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
226
       prefetches, which we can treat as nops.  No worries about
227
       missed exceptions here.  */
228
    if (unlikely(ra == 31)) {
229
        return;
230
    }
231

    
232
    addr = tcg_temp_new();
233
    if (rb != 31) {
234
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
235
        if (clear) {
236
            tcg_gen_andi_i64(addr, addr, ~0x7);
237
        }
238
    } else {
239
        if (clear) {
240
            disp16 &= ~0x7;
241
        }
242
        tcg_gen_movi_i64(addr, disp16);
243
    }
244

    
245
    va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
246
    tcg_gen_qemu_load(va, addr, ctx->mem_idx);
247

    
248
    tcg_temp_free(addr);
249
}
250

    
251
static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
252
{
253
    TCGv_i32 tmp32 = tcg_temp_new_i32();
254
    TCGv tmp = tcg_temp_new();
255
    gen_helper_f_to_memory(tmp32, t0);
256
    tcg_gen_extu_i32_i64(tmp, tmp32);
257
    tcg_gen_qemu_st32(tmp, t1, flags);
258
    tcg_temp_free(tmp);
259
    tcg_temp_free_i32(tmp32);
260
}
261

    
262
static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
263
{
264
    TCGv tmp = tcg_temp_new();
265
    gen_helper_g_to_memory(tmp, t0);
266
    tcg_gen_qemu_st64(tmp, t1, flags);
267
    tcg_temp_free(tmp);
268
}
269

    
270
static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
271
{
272
    TCGv_i32 tmp32 = tcg_temp_new_i32();
273
    TCGv tmp = tcg_temp_new();
274
    gen_helper_s_to_memory(tmp32, t0);
275
    tcg_gen_extu_i32_i64(tmp, tmp32);
276
    tcg_gen_qemu_st32(tmp, t1, flags);
277
    tcg_temp_free(tmp);
278
    tcg_temp_free_i32(tmp32);
279
}
280

    
281
static inline void gen_store_mem(DisasContext *ctx,
282
                                 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
283
                                                            int flags),
284
                                 int ra, int rb, int32_t disp16, int fp,
285
                                 int clear)
286
{
287
    TCGv addr, va;
288

    
289
    addr = tcg_temp_new();
290
    if (rb != 31) {
291
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
292
        if (clear) {
293
            tcg_gen_andi_i64(addr, addr, ~0x7);
294
        }
295
    } else {
296
        if (clear) {
297
            disp16 &= ~0x7;
298
        }
299
        tcg_gen_movi_i64(addr, disp16);
300
    }
301

    
302
    if (ra == 31) {
303
        va = tcg_const_i64(0);
304
    } else {
305
        va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
306
    }
307
    tcg_gen_qemu_store(va, addr, ctx->mem_idx);
308

    
309
    tcg_temp_free(addr);
310
    if (ra == 31) {
311
        tcg_temp_free(va);
312
    }
313
}
314

    
315
static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
316
                                        int32_t disp16, int quad)
317
{
318
    TCGv addr;
319

    
320
    if (ra == 31) {
321
        /* ??? Don't bother storing anything.  The user can't tell
322
           the difference, since the zero register always reads zero.  */
323
        return NO_EXIT;
324
    }
325

    
326
#if defined(CONFIG_USER_ONLY)
327
    addr = cpu_lock_st_addr;
328
#else
329
    addr = tcg_local_new();
330
#endif
331

    
332
    if (rb != 31) {
333
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
334
    } else {
335
        tcg_gen_movi_i64(addr, disp16);
336
    }
337

    
338
#if defined(CONFIG_USER_ONLY)
339
    /* ??? This is handled via a complicated version of compare-and-swap
340
       in the cpu_loop.  Hopefully one day we'll have a real CAS opcode
341
       in TCG so that this isn't necessary.  */
342
    return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
343
#else
344
    /* ??? In system mode we are never multi-threaded, so CAS can be
345
       implemented via a non-atomic load-compare-store sequence.  */
346
    {
347
        int lab_fail, lab_done;
348
        TCGv val;
349

    
350
        lab_fail = gen_new_label();
351
        lab_done = gen_new_label();
352
        tcg_gen_brcond(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
353

    
354
        val = tcg_temp_new();
355
        if (quad) {
356
            tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
357
        } else {
358
            tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
359
        }
360
        tcg_gen_brcond(TCG_COND_NE, val, cpu_lock_value, lab_fail);
361

    
362
        if (quad) {
363
            tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
364
        } else {
365
            tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
366
        }
367
        tcg_gen_movi_i64(cpu_ir[ra], 1);
368
        tcg_gen_br(lab_done);
369

    
370
        gen_set_label(lab_fail);
371
        tcg_gen_movi_i64(cpu_ir[ra], 0);
372

    
373
        gen_set_label(lab_done);
374
        tcg_gen_movi_i64(cpu_lock_addr, -1);
375

    
376
        tcg_temp_free(addr);
377
        return NO_EXIT;
378
    }
379
#endif
380
}
381

    
382
static int use_goto_tb(DisasContext *ctx, uint64_t dest)
383
{
384
    /* Check for the dest on the same page as the start of the TB.  We
385
       also want to suppress goto_tb in the case of single-steping and IO.  */
386
    return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
387
            && !ctx->env->singlestep_enabled
388
            && !(ctx->tb->cflags & CF_LAST_IO));
389
}
390

    
391
static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
392
{
393
    uint64_t dest = ctx->pc + (disp << 2);
394

    
395
    if (ra != 31) {
396
        tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
397
    }
398

    
399
    /* Notice branch-to-next; used to initialize RA with the PC.  */
400
    if (disp == 0) {
401
        return 0;
402
    } else if (use_goto_tb(ctx, dest)) {
403
        tcg_gen_goto_tb(0);
404
        tcg_gen_movi_i64(cpu_pc, dest);
405
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
406
        return EXIT_GOTO_TB;
407
    } else {
408
        tcg_gen_movi_i64(cpu_pc, dest);
409
        return EXIT_PC_UPDATED;
410
    }
411
}
412

    
413
static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
414
                                     TCGv cmp, int32_t disp)
415
{
416
    uint64_t dest = ctx->pc + (disp << 2);
417
    int lab_true = gen_new_label();
418

    
419
    if (use_goto_tb(ctx, dest)) {
420
        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
421

    
422
        tcg_gen_goto_tb(0);
423
        tcg_gen_movi_i64(cpu_pc, ctx->pc);
424
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
425

    
426
        gen_set_label(lab_true);
427
        tcg_gen_goto_tb(1);
428
        tcg_gen_movi_i64(cpu_pc, dest);
429
        tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
430

    
431
        return EXIT_GOTO_TB;
432
    } else {
433
        int lab_over = gen_new_label();
434

    
435
        /* ??? Consider using either
436
             movi pc, next
437
             addi tmp, pc, disp
438
             movcond pc, cond, 0, tmp, pc
439
           or
440
             setcond tmp, cond, 0
441
             movi pc, next
442
             neg tmp, tmp
443
             andi tmp, tmp, disp
444
             add pc, pc, tmp
445
           The current diamond subgraph surely isn't efficient.  */
446

    
447
        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
448
        tcg_gen_movi_i64(cpu_pc, ctx->pc);
449
        tcg_gen_br(lab_over);
450
        gen_set_label(lab_true);
451
        tcg_gen_movi_i64(cpu_pc, dest);
452
        gen_set_label(lab_over);
453

    
454
        return EXIT_PC_UPDATED;
455
    }
456
}
457

    
458
static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
459
                            int32_t disp, int mask)
460
{
461
    TCGv cmp_tmp;
462

    
463
    if (unlikely(ra == 31)) {
464
        cmp_tmp = tcg_const_i64(0);
465
    } else {
466
        cmp_tmp = tcg_temp_new();
467
        if (mask) {
468
            tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
469
        } else {
470
            tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
471
        }
472
    }
473

    
474
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
475
}
476

    
477
/* Fold -0.0 for comparison with COND.  */
478

    
479
static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
480
{
481
    uint64_t mzero = 1ull << 63;
482

    
483
    switch (cond) {
484
    case TCG_COND_LE:
485
    case TCG_COND_GT:
486
        /* For <= or >, the -0.0 value directly compares the way we want.  */
487
        tcg_gen_mov_i64(dest, src);
488
        break;
489

    
490
    case TCG_COND_EQ:
491
    case TCG_COND_NE:
492
        /* For == or !=, we can simply mask off the sign bit and compare.  */
493
        tcg_gen_andi_i64(dest, src, mzero - 1);
494
        break;
495

    
496
    case TCG_COND_GE:
497
    case TCG_COND_LT:
498
        /* For >= or <, map -0.0 to +0.0 via comparison and mask.  */
499
        tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
500
        tcg_gen_neg_i64(dest, dest);
501
        tcg_gen_and_i64(dest, dest, src);
502
        break;
503

    
504
    default:
505
        abort();
506
    }
507
}
508

    
509
static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
510
                             int32_t disp)
511
{
512
    TCGv cmp_tmp;
513

    
514
    if (unlikely(ra == 31)) {
515
        /* Very uncommon case, but easier to optimize it to an integer
516
           comparison than continuing with the floating point comparison.  */
517
        return gen_bcond(ctx, cond, ra, disp, 0);
518
    }
519

    
520
    cmp_tmp = tcg_temp_new();
521
    gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
522
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
523
}
524

    
525
static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
526
                     int islit, uint8_t lit, int mask)
527
{
528
    TCGCond inv_cond = tcg_invert_cond(cond);
529
    int l1;
530

    
531
    if (unlikely(rc == 31))
532
        return;
533

    
534
    l1 = gen_new_label();
535

    
536
    if (ra != 31) {
537
        if (mask) {
538
            TCGv tmp = tcg_temp_new();
539
            tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
540
            tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
541
            tcg_temp_free(tmp);
542
        } else
543
            tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
544
    } else {
545
        /* Very uncommon case - Do not bother to optimize.  */
546
        TCGv tmp = tcg_const_i64(0);
547
        tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
548
        tcg_temp_free(tmp);
549
    }
550

    
551
    if (islit)
552
        tcg_gen_movi_i64(cpu_ir[rc], lit);
553
    else
554
        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
555
    gen_set_label(l1);
556
}
557

    
558
static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
559
{
560
    TCGv cmp_tmp;
561
    int l1;
562

    
563
    if (unlikely(rc == 31)) {
564
        return;
565
    }
566

    
567
    cmp_tmp = tcg_temp_new();
568
    if (unlikely(ra == 31)) {
569
        tcg_gen_movi_i64(cmp_tmp, 0);
570
    } else {
571
        gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
572
    }
573

    
574
    l1 = gen_new_label();
575
    tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
576
    tcg_temp_free(cmp_tmp);
577

    
578
    if (rb != 31)
579
        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
580
    else
581
        tcg_gen_movi_i64(cpu_fir[rc], 0);
582
    gen_set_label(l1);
583
}
584

    
585
#define QUAL_RM_N       0x080   /* Round mode nearest even */
586
#define QUAL_RM_C       0x000   /* Round mode chopped */
587
#define QUAL_RM_M       0x040   /* Round mode minus infinity */
588
#define QUAL_RM_D       0x0c0   /* Round mode dynamic */
589
#define QUAL_RM_MASK    0x0c0
590

    
591
#define QUAL_U          0x100   /* Underflow enable (fp output) */
592
#define QUAL_V          0x100   /* Overflow enable (int output) */
593
#define QUAL_S          0x400   /* Software completion enable */
594
#define QUAL_I          0x200   /* Inexact detection enable */
595

    
596
static void gen_qual_roundmode(DisasContext *ctx, int fn11)
597
{
598
    TCGv_i32 tmp;
599

    
600
    fn11 &= QUAL_RM_MASK;
601
    if (fn11 == ctx->tb_rm) {
602
        return;
603
    }
604
    ctx->tb_rm = fn11;
605

    
606
    tmp = tcg_temp_new_i32();
607
    switch (fn11) {
608
    case QUAL_RM_N:
609
        tcg_gen_movi_i32(tmp, float_round_nearest_even);
610
        break;
611
    case QUAL_RM_C:
612
        tcg_gen_movi_i32(tmp, float_round_to_zero);
613
        break;
614
    case QUAL_RM_M:
615
        tcg_gen_movi_i32(tmp, float_round_down);
616
        break;
617
    case QUAL_RM_D:
618
        tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
619
        break;
620
    }
621

    
622
#if defined(CONFIG_SOFTFLOAT_INLINE)
623
    /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
624
       With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
625
       sets the one field.  */
626
    tcg_gen_st8_i32(tmp, cpu_env,
627
                    offsetof(CPUState, fp_status.float_rounding_mode));
628
#else
629
    gen_helper_setroundmode(tmp);
630
#endif
631

    
632
    tcg_temp_free_i32(tmp);
633
}
634

    
635
static void gen_qual_flushzero(DisasContext *ctx, int fn11)
636
{
637
    TCGv_i32 tmp;
638

    
639
    fn11 &= QUAL_U;
640
    if (fn11 == ctx->tb_ftz) {
641
        return;
642
    }
643
    ctx->tb_ftz = fn11;
644

    
645
    tmp = tcg_temp_new_i32();
646
    if (fn11) {
647
        /* Underflow is enabled, use the FPCR setting.  */
648
        tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
649
    } else {
650
        /* Underflow is disabled, force flush-to-zero.  */
651
        tcg_gen_movi_i32(tmp, 1);
652
    }
653

    
654
#if defined(CONFIG_SOFTFLOAT_INLINE)
655
    tcg_gen_st8_i32(tmp, cpu_env,
656
                    offsetof(CPUState, fp_status.flush_to_zero));
657
#else
658
    gen_helper_setflushzero(tmp);
659
#endif
660

    
661
    tcg_temp_free_i32(tmp);
662
}
663

    
664
static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
665
{
666
    TCGv val = tcg_temp_new();
667
    if (reg == 31) {
668
        tcg_gen_movi_i64(val, 0);
669
    } else if (fn11 & QUAL_S) {
670
        gen_helper_ieee_input_s(val, cpu_fir[reg]);
671
    } else if (is_cmp) {
672
        gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
673
    } else {
674
        gen_helper_ieee_input(val, cpu_fir[reg]);
675
    }
676
    return val;
677
}
678

    
679
static void gen_fp_exc_clear(void)
680
{
681
#if defined(CONFIG_SOFTFLOAT_INLINE)
682
    TCGv_i32 zero = tcg_const_i32(0);
683
    tcg_gen_st8_i32(zero, cpu_env,
684
                    offsetof(CPUState, fp_status.float_exception_flags));
685
    tcg_temp_free_i32(zero);
686
#else
687
    gen_helper_fp_exc_clear();
688
#endif
689
}
690

    
691
static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
692
{
693
    /* ??? We ought to be able to do something with imprecise exceptions.
694
       E.g. notice we're still in the trap shadow of something within the
695
       TB and do not generate the code to signal the exception; end the TB
696
       when an exception is forced to arrive, either by consumption of a
697
       register value or TRAPB or EXCB.  */
698
    TCGv_i32 exc = tcg_temp_new_i32();
699
    TCGv_i32 reg;
700

    
701
#if defined(CONFIG_SOFTFLOAT_INLINE)
702
    tcg_gen_ld8u_i32(exc, cpu_env,
703
                     offsetof(CPUState, fp_status.float_exception_flags));
704
#else
705
    gen_helper_fp_exc_get(exc);
706
#endif
707

    
708
    if (ignore) {
709
        tcg_gen_andi_i32(exc, exc, ~ignore);
710
    }
711

    
712
    /* ??? Pass in the regno of the destination so that the helper can
713
       set EXC_MASK, which contains a bitmask of destination registers
714
       that have caused arithmetic traps.  A simple userspace emulation
715
       does not require this.  We do need it for a guest kernel's entArith,
716
       or if we were to do something clever with imprecise exceptions.  */
717
    reg = tcg_const_i32(rc + 32);
718

    
719
    if (fn11 & QUAL_S) {
720
        gen_helper_fp_exc_raise_s(exc, reg);
721
    } else {
722
        gen_helper_fp_exc_raise(exc, reg);
723
    }
724

    
725
    tcg_temp_free_i32(reg);
726
    tcg_temp_free_i32(exc);
727
}
728

    
729
static inline void gen_fp_exc_raise(int rc, int fn11)
730
{
731
    gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
732
}
733

    
734
static void gen_fcvtlq(int rb, int rc)
735
{
736
    if (unlikely(rc == 31)) {
737
        return;
738
    }
739
    if (unlikely(rb == 31)) {
740
        tcg_gen_movi_i64(cpu_fir[rc], 0);
741
    } else {
742
        TCGv tmp = tcg_temp_new();
743

    
744
        /* The arithmetic right shift here, plus the sign-extended mask below
745
           yields a sign-extended result without an explicit ext32s_i64.  */
746
        tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
747
        tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
748
        tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
749
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
750
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
751

    
752
        tcg_temp_free(tmp);
753
    }
754
}
755

    
756
static void gen_fcvtql(int rb, int rc)
757
{
758
    if (unlikely(rc == 31)) {
759
        return;
760
    }
761
    if (unlikely(rb == 31)) {
762
        tcg_gen_movi_i64(cpu_fir[rc], 0);
763
    } else {
764
        TCGv tmp = tcg_temp_new();
765

    
766
        tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
767
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
768
        tcg_gen_shli_i64(tmp, tmp, 32);
769
        tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
770
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
771

    
772
        tcg_temp_free(tmp);
773
    }
774
}
775

    
776
static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
777
{
778
    if (rb != 31) {
779
        int lab = gen_new_label();
780
        TCGv tmp = tcg_temp_new();
781

    
782
        tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
783
        tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
784
        gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
785

    
786
        gen_set_label(lab);
787
    }
788
    gen_fcvtql(rb, rc);
789
}
790

    
791
#define FARITH2(name)                                   \
792
static inline void glue(gen_f, name)(int rb, int rc)    \
793
{                                                       \
794
    if (unlikely(rc == 31)) {                           \
795
        return;                                         \
796
    }                                                   \
797
    if (rb != 31) {                                     \
798
        gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
799
    } else {                                                \
800
        TCGv tmp = tcg_const_i64(0);                    \
801
        gen_helper_ ## name (cpu_fir[rc], tmp);         \
802
        tcg_temp_free(tmp);                             \
803
    }                                                   \
804
}
805

    
806
/* ??? VAX instruction qualifiers ignored.  */
807
FARITH2(sqrtf)
808
FARITH2(sqrtg)
809
FARITH2(cvtgf)
810
FARITH2(cvtgq)
811
FARITH2(cvtqf)
812
FARITH2(cvtqg)
813

    
814
static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
815
                            int rb, int rc, int fn11)
816
{
817
    TCGv vb;
818

    
819
    /* ??? This is wrong: the instruction is not a nop, it still may
820
       raise exceptions.  */
821
    if (unlikely(rc == 31)) {
822
        return;
823
    }
824

    
825
    gen_qual_roundmode(ctx, fn11);
826
    gen_qual_flushzero(ctx, fn11);
827
    gen_fp_exc_clear();
828

    
829
    vb = gen_ieee_input(rb, fn11, 0);
830
    helper(cpu_fir[rc], vb);
831
    tcg_temp_free(vb);
832

    
833
    gen_fp_exc_raise(rc, fn11);
834
}
835

    
836
#define IEEE_ARITH2(name)                                       \
837
static inline void glue(gen_f, name)(DisasContext *ctx,         \
838
                                     int rb, int rc, int fn11)  \
839
{                                                               \
840
    gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
841
}
842
IEEE_ARITH2(sqrts)
843
IEEE_ARITH2(sqrtt)
844
IEEE_ARITH2(cvtst)
845
IEEE_ARITH2(cvtts)
846

    
847
static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
848
{
849
    TCGv vb;
850
    int ignore = 0;
851

    
852
    /* ??? This is wrong: the instruction is not a nop, it still may
853
       raise exceptions.  */
854
    if (unlikely(rc == 31)) {
855
        return;
856
    }
857

    
858
    /* No need to set flushzero, since we have an integer output.  */
859
    gen_fp_exc_clear();
860
    vb = gen_ieee_input(rb, fn11, 0);
861

    
862
    /* Almost all integer conversions use cropped rounding, and most
863
       also do not have integer overflow enabled.  Special case that.  */
864
    switch (fn11) {
865
    case QUAL_RM_C:
866
        gen_helper_cvttq_c(cpu_fir[rc], vb);
867
        break;
868
    case QUAL_V | QUAL_RM_C:
869
    case QUAL_S | QUAL_V | QUAL_RM_C:
870
        ignore = float_flag_inexact;
871
        /* FALLTHRU */
872
    case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
873
        gen_helper_cvttq_svic(cpu_fir[rc], vb);
874
        break;
875
    default:
876
        gen_qual_roundmode(ctx, fn11);
877
        gen_helper_cvttq(cpu_fir[rc], vb);
878
        ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
879
        ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
880
        break;
881
    }
882
    tcg_temp_free(vb);
883

    
884
    gen_fp_exc_raise_ignore(rc, fn11, ignore);
885
}
886

    
887
static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
888
                            int rb, int rc, int fn11)
889
{
890
    TCGv vb;
891

    
892
    /* ??? This is wrong: the instruction is not a nop, it still may
893
       raise exceptions.  */
894
    if (unlikely(rc == 31)) {
895
        return;
896
    }
897

    
898
    gen_qual_roundmode(ctx, fn11);
899

    
900
    if (rb == 31) {
901
        vb = tcg_const_i64(0);
902
    } else {
903
        vb = cpu_fir[rb];
904
    }
905

    
906
    /* The only exception that can be raised by integer conversion
907
       is inexact.  Thus we only need to worry about exceptions when
908
       inexact handling is requested.  */
909
    if (fn11 & QUAL_I) {
910
        gen_fp_exc_clear();
911
        helper(cpu_fir[rc], vb);
912
        gen_fp_exc_raise(rc, fn11);
913
    } else {
914
        helper(cpu_fir[rc], vb);
915
    }
916

    
917
    if (rb == 31) {
918
        tcg_temp_free(vb);
919
    }
920
}
921

    
922
#define IEEE_INTCVT(name)                                       \
923
static inline void glue(gen_f, name)(DisasContext *ctx,         \
924
                                     int rb, int rc, int fn11)  \
925
{                                                               \
926
    gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
927
}
928
IEEE_INTCVT(cvtqs)
929
IEEE_INTCVT(cvtqt)
930

    
931
static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
932
{
933
    TCGv va, vb, vmask;
934
    int za = 0, zb = 0;
935

    
936
    if (unlikely(rc == 31)) {
937
        return;
938
    }
939

    
940
    vmask = tcg_const_i64(mask);
941

    
942
    TCGV_UNUSED_I64(va);
943
    if (ra == 31) {
944
        if (inv_a) {
945
            va = vmask;
946
        } else {
947
            za = 1;
948
        }
949
    } else {
950
        va = tcg_temp_new_i64();
951
        tcg_gen_mov_i64(va, cpu_fir[ra]);
952
        if (inv_a) {
953
            tcg_gen_andc_i64(va, vmask, va);
954
        } else {
955
            tcg_gen_and_i64(va, va, vmask);
956
        }
957
    }
958

    
959
    TCGV_UNUSED_I64(vb);
960
    if (rb == 31) {
961
        zb = 1;
962
    } else {
963
        vb = tcg_temp_new_i64();
964
        tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
965
    }
966

    
967
    switch (za << 1 | zb) {
968
    case 0 | 0:
969
        tcg_gen_or_i64(cpu_fir[rc], va, vb);
970
        break;
971
    case 0 | 1:
972
        tcg_gen_mov_i64(cpu_fir[rc], va);
973
        break;
974
    case 2 | 0:
975
        tcg_gen_mov_i64(cpu_fir[rc], vb);
976
        break;
977
    case 2 | 1:
978
        tcg_gen_movi_i64(cpu_fir[rc], 0);
979
        break;
980
    }
981

    
982
    tcg_temp_free(vmask);
983
    if (ra != 31) {
984
        tcg_temp_free(va);
985
    }
986
    if (rb != 31) {
987
        tcg_temp_free(vb);
988
    }
989
}
990

    
991
static inline void gen_fcpys(int ra, int rb, int rc)
992
{
993
    gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
994
}
995

    
996
static inline void gen_fcpysn(int ra, int rb, int rc)
997
{
998
    gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
999
}
1000

    
1001
static inline void gen_fcpyse(int ra, int rb, int rc)
1002
{
1003
    gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1004
}
1005

    
1006
#define FARITH3(name)                                           \
1007
static inline void glue(gen_f, name)(int ra, int rb, int rc)    \
1008
{                                                               \
1009
    TCGv va, vb;                                                \
1010
                                                                \
1011
    if (unlikely(rc == 31)) {                                   \
1012
        return;                                                 \
1013
    }                                                           \
1014
    if (ra == 31) {                                             \
1015
        va = tcg_const_i64(0);                                  \
1016
    } else {                                                    \
1017
        va = cpu_fir[ra];                                       \
1018
    }                                                           \
1019
    if (rb == 31) {                                             \
1020
        vb = tcg_const_i64(0);                                  \
1021
    } else {                                                    \
1022
        vb = cpu_fir[rb];                                       \
1023
    }                                                           \
1024
                                                                \
1025
    gen_helper_ ## name (cpu_fir[rc], va, vb);                  \
1026
                                                                \
1027
    if (ra == 31) {                                             \
1028
        tcg_temp_free(va);                                      \
1029
    }                                                           \
1030
    if (rb == 31) {                                             \
1031
        tcg_temp_free(vb);                                      \
1032
    }                                                           \
1033
}
1034

    
1035
/* ??? VAX instruction qualifiers ignored.  */
1036
FARITH3(addf)
1037
FARITH3(subf)
1038
FARITH3(mulf)
1039
FARITH3(divf)
1040
FARITH3(addg)
1041
FARITH3(subg)
1042
FARITH3(mulg)
1043
FARITH3(divg)
1044
FARITH3(cmpgeq)
1045
FARITH3(cmpglt)
1046
FARITH3(cmpgle)
1047

    
1048
static void gen_ieee_arith3(DisasContext *ctx,
1049
                            void (*helper)(TCGv, TCGv, TCGv),
1050
                            int ra, int rb, int rc, int fn11)
1051
{
1052
    TCGv va, vb;
1053

    
1054
    /* ??? This is wrong: the instruction is not a nop, it still may
1055
       raise exceptions.  */
1056
    if (unlikely(rc == 31)) {
1057
        return;
1058
    }
1059

    
1060
    gen_qual_roundmode(ctx, fn11);
1061
    gen_qual_flushzero(ctx, fn11);
1062
    gen_fp_exc_clear();
1063

    
1064
    va = gen_ieee_input(ra, fn11, 0);
1065
    vb = gen_ieee_input(rb, fn11, 0);
1066
    helper(cpu_fir[rc], va, vb);
1067
    tcg_temp_free(va);
1068
    tcg_temp_free(vb);
1069

    
1070
    gen_fp_exc_raise(rc, fn11);
1071
}
1072

    
1073
#define IEEE_ARITH3(name)                                               \
1074
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1075
                                     int ra, int rb, int rc, int fn11)  \
1076
{                                                                       \
1077
    gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
1078
}
1079
IEEE_ARITH3(adds)
1080
IEEE_ARITH3(subs)
1081
IEEE_ARITH3(muls)
1082
IEEE_ARITH3(divs)
1083
IEEE_ARITH3(addt)
1084
IEEE_ARITH3(subt)
1085
IEEE_ARITH3(mult)
1086
IEEE_ARITH3(divt)
1087

    
1088
static void gen_ieee_compare(DisasContext *ctx,
1089
                             void (*helper)(TCGv, TCGv, TCGv),
1090
                             int ra, int rb, int rc, int fn11)
1091
{
1092
    TCGv va, vb;
1093

    
1094
    /* ??? This is wrong: the instruction is not a nop, it still may
1095
       raise exceptions.  */
1096
    if (unlikely(rc == 31)) {
1097
        return;
1098
    }
1099

    
1100
    gen_fp_exc_clear();
1101

    
1102
    va = gen_ieee_input(ra, fn11, 1);
1103
    vb = gen_ieee_input(rb, fn11, 1);
1104
    helper(cpu_fir[rc], va, vb);
1105
    tcg_temp_free(va);
1106
    tcg_temp_free(vb);
1107

    
1108
    gen_fp_exc_raise(rc, fn11);
1109
}
1110

    
1111
#define IEEE_CMP3(name)                                                 \
1112
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1113
                                     int ra, int rb, int rc, int fn11)  \
1114
{                                                                       \
1115
    gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
1116
}
1117
IEEE_CMP3(cmptun)
1118
IEEE_CMP3(cmpteq)
1119
IEEE_CMP3(cmptlt)
1120
IEEE_CMP3(cmptle)
1121

    
1122
static inline uint64_t zapnot_mask(uint8_t lit)
1123
{
1124
    uint64_t mask = 0;
1125
    int i;
1126

    
1127
    for (i = 0; i < 8; ++i) {
1128
        if ((lit >> i) & 1)
1129
            mask |= 0xffull << (i * 8);
1130
    }
1131
    return mask;
1132
}
1133

    
1134
/* Implement zapnot with an immediate operand, which expands to some
1135
   form of immediate AND.  This is a basic building block in the
1136
   definition of many of the other byte manipulation instructions.  */
1137
static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1138
{
1139
    switch (lit) {
1140
    case 0x00:
1141
        tcg_gen_movi_i64(dest, 0);
1142
        break;
1143
    case 0x01:
1144
        tcg_gen_ext8u_i64(dest, src);
1145
        break;
1146
    case 0x03:
1147
        tcg_gen_ext16u_i64(dest, src);
1148
        break;
1149
    case 0x0f:
1150
        tcg_gen_ext32u_i64(dest, src);
1151
        break;
1152
    case 0xff:
1153
        tcg_gen_mov_i64(dest, src);
1154
        break;
1155
    default:
1156
        tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1157
        break;
1158
    }
1159
}
1160

    
1161
static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1162
{
1163
    if (unlikely(rc == 31))
1164
        return;
1165
    else if (unlikely(ra == 31))
1166
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1167
    else if (islit)
1168
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1169
    else
1170
        gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1171
}
1172

    
1173
static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1174
{
1175
    if (unlikely(rc == 31))
1176
        return;
1177
    else if (unlikely(ra == 31))
1178
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1179
    else if (islit)
1180
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1181
    else
1182
        gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1183
}
1184

    
1185

    
1186
/* EXTWH, EXTLH, EXTQH */
1187
static void gen_ext_h(int ra, int rb, int rc, int islit,
1188
                      uint8_t lit, uint8_t byte_mask)
1189
{
1190
    if (unlikely(rc == 31))
1191
        return;
1192
    else if (unlikely(ra == 31))
1193
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1194
    else {
1195
        if (islit) {
1196
            lit = (64 - (lit & 7) * 8) & 0x3f;
1197
            tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1198
        } else {
1199
            TCGv tmp1 = tcg_temp_new();
1200
            tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1201
            tcg_gen_shli_i64(tmp1, tmp1, 3);
1202
            tcg_gen_neg_i64(tmp1, tmp1);
1203
            tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1204
            tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1205
            tcg_temp_free(tmp1);
1206
        }
1207
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1208
    }
1209
}
1210

    
1211
/* EXTBL, EXTWL, EXTLL, EXTQL */
1212
static void gen_ext_l(int ra, int rb, int rc, int islit,
1213
                      uint8_t lit, uint8_t byte_mask)
1214
{
1215
    if (unlikely(rc == 31))
1216
        return;
1217
    else if (unlikely(ra == 31))
1218
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1219
    else {
1220
        if (islit) {
1221
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1222
        } else {
1223
            TCGv tmp = tcg_temp_new();
1224
            tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1225
            tcg_gen_shli_i64(tmp, tmp, 3);
1226
            tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1227
            tcg_temp_free(tmp);
1228
        }
1229
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1230
    }
1231
}
1232

    
1233
/* INSWH, INSLH, INSQH */
1234
static void gen_ins_h(int ra, int rb, int rc, int islit,
1235
                      uint8_t lit, uint8_t byte_mask)
1236
{
1237
    if (unlikely(rc == 31))
1238
        return;
1239
    else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1240
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1241
    else {
1242
        TCGv tmp = tcg_temp_new();
1243

    
1244
        /* The instruction description has us left-shift the byte mask
1245
           and extract bits <15:8> and apply that zap at the end.  This
1246
           is equivalent to simply performing the zap first and shifting
1247
           afterward.  */
1248
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1249

    
1250
        if (islit) {
1251
            /* Note that we have handled the lit==0 case above.  */
1252
            tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1253
        } else {
1254
            TCGv shift = tcg_temp_new();
1255

    
1256
            /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1257
               Do this portably by splitting the shift into two parts:
1258
               shift_count-1 and 1.  Arrange for the -1 by using
1259
               ones-complement instead of twos-complement in the negation:
1260
               ~((B & 7) * 8) & 63.  */
1261

    
1262
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1263
            tcg_gen_shli_i64(shift, shift, 3);
1264
            tcg_gen_not_i64(shift, shift);
1265
            tcg_gen_andi_i64(shift, shift, 0x3f);
1266

    
1267
            tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1268
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1269
            tcg_temp_free(shift);
1270
        }
1271
        tcg_temp_free(tmp);
1272
    }
1273
}
1274

    
1275
/* INSBL, INSWL, INSLL, INSQL */
1276
static void gen_ins_l(int ra, int rb, int rc, int islit,
1277
                      uint8_t lit, uint8_t byte_mask)
1278
{
1279
    if (unlikely(rc == 31))
1280
        return;
1281
    else if (unlikely(ra == 31))
1282
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1283
    else {
1284
        TCGv tmp = tcg_temp_new();
1285

    
1286
        /* The instruction description has us left-shift the byte mask
1287
           the same number of byte slots as the data and apply the zap
1288
           at the end.  This is equivalent to simply performing the zap
1289
           first and shifting afterward.  */
1290
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1291

    
1292
        if (islit) {
1293
            tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1294
        } else {
1295
            TCGv shift = tcg_temp_new();
1296
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1297
            tcg_gen_shli_i64(shift, shift, 3);
1298
            tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1299
            tcg_temp_free(shift);
1300
        }
1301
        tcg_temp_free(tmp);
1302
    }
1303
}
1304

    
1305
/* MSKWH, MSKLH, MSKQH */
1306
static void gen_msk_h(int ra, int rb, int rc, int islit,
1307
                      uint8_t lit, uint8_t byte_mask)
1308
{
1309
    if (unlikely(rc == 31))
1310
        return;
1311
    else if (unlikely(ra == 31))
1312
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1313
    else if (islit) {
1314
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1315
    } else {
1316
        TCGv shift = tcg_temp_new();
1317
        TCGv mask = tcg_temp_new();
1318

    
1319
        /* The instruction description is as above, where the byte_mask
1320
           is shifted left, and then we extract bits <15:8>.  This can be
1321
           emulated with a right-shift on the expanded byte mask.  This
1322
           requires extra care because for an input <2:0> == 0 we need a
1323
           shift of 64 bits in order to generate a zero.  This is done by
1324
           splitting the shift into two parts, the variable shift - 1
1325
           followed by a constant 1 shift.  The code we expand below is
1326
           equivalent to ~((B & 7) * 8) & 63.  */
1327

    
1328
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1329
        tcg_gen_shli_i64(shift, shift, 3);
1330
        tcg_gen_not_i64(shift, shift);
1331
        tcg_gen_andi_i64(shift, shift, 0x3f);
1332
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1333
        tcg_gen_shr_i64(mask, mask, shift);
1334
        tcg_gen_shri_i64(mask, mask, 1);
1335

    
1336
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1337

    
1338
        tcg_temp_free(mask);
1339
        tcg_temp_free(shift);
1340
    }
1341
}
1342

    
1343
/* MSKBL, MSKWL, MSKLL, MSKQL */
1344
static void gen_msk_l(int ra, int rb, int rc, int islit,
1345
                      uint8_t lit, uint8_t byte_mask)
1346
{
1347
    if (unlikely(rc == 31))
1348
        return;
1349
    else if (unlikely(ra == 31))
1350
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1351
    else if (islit) {
1352
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1353
    } else {
1354
        TCGv shift = tcg_temp_new();
1355
        TCGv mask = tcg_temp_new();
1356

    
1357
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1358
        tcg_gen_shli_i64(shift, shift, 3);
1359
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1360
        tcg_gen_shl_i64(mask, mask, shift);
1361

    
1362
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1363

    
1364
        tcg_temp_free(mask);
1365
        tcg_temp_free(shift);
1366
    }
1367
}
1368

    
1369
/* Code to call arith3 helpers */
1370
#define ARITH3(name)                                                  \
1371
static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1372
                                    uint8_t lit)                      \
1373
{                                                                     \
1374
    if (unlikely(rc == 31))                                           \
1375
        return;                                                       \
1376
                                                                      \
1377
    if (ra != 31) {                                                   \
1378
        if (islit) {                                                  \
1379
            TCGv tmp = tcg_const_i64(lit);                            \
1380
            gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp);         \
1381
            tcg_temp_free(tmp);                                       \
1382
        } else                                                        \
1383
            gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1384
    } else {                                                          \
1385
        TCGv tmp1 = tcg_const_i64(0);                                 \
1386
        if (islit) {                                                  \
1387
            TCGv tmp2 = tcg_const_i64(lit);                           \
1388
            gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2);             \
1389
            tcg_temp_free(tmp2);                                      \
1390
        } else                                                        \
1391
            gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]);       \
1392
        tcg_temp_free(tmp1);                                          \
1393
    }                                                                 \
1394
}
1395
ARITH3(cmpbge)
1396
ARITH3(addlv)
1397
ARITH3(sublv)
1398
ARITH3(addqv)
1399
ARITH3(subqv)
1400
ARITH3(umulh)
1401
ARITH3(mullv)
1402
ARITH3(mulqv)
1403
ARITH3(minub8)
1404
ARITH3(minsb8)
1405
ARITH3(minuw4)
1406
ARITH3(minsw4)
1407
ARITH3(maxub8)
1408
ARITH3(maxsb8)
1409
ARITH3(maxuw4)
1410
ARITH3(maxsw4)
1411
ARITH3(perr)
1412

    
1413
#define MVIOP2(name)                                    \
1414
static inline void glue(gen_, name)(int rb, int rc)     \
1415
{                                                       \
1416
    if (unlikely(rc == 31))                             \
1417
        return;                                         \
1418
    if (unlikely(rb == 31))                             \
1419
        tcg_gen_movi_i64(cpu_ir[rc], 0);                \
1420
    else                                                \
1421
        gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]);   \
1422
}
1423
MVIOP2(pklb)
1424
MVIOP2(pkwb)
1425
MVIOP2(unpkbl)
1426
MVIOP2(unpkbw)
1427

    
1428
static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1429
                    int islit, uint8_t lit)
1430
{
1431
    TCGv va, vb;
1432

    
1433
    if (unlikely(rc == 31)) {
1434
        return;
1435
    }
1436

    
1437
    if (ra == 31) {
1438
        va = tcg_const_i64(0);
1439
    } else {
1440
        va = cpu_ir[ra];
1441
    }
1442
    if (islit) {
1443
        vb = tcg_const_i64(lit);
1444
    } else {
1445
        vb = cpu_ir[rb];
1446
    }
1447

    
1448
    tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1449

    
1450
    if (ra == 31) {
1451
        tcg_temp_free(va);
1452
    }
1453
    if (islit) {
1454
        tcg_temp_free(vb);
1455
    }
1456
}
1457

    
1458
static void gen_rx(int ra, int set)
1459
{
1460
    TCGv_i32 tmp;
1461

    
1462
    if (ra != 31) {
1463
        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1464
    }
1465

    
1466
    tmp = tcg_const_i32(set);
1467
    tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1468
    tcg_temp_free_i32(tmp);
1469
}
1470

    
1471
static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1472
{
1473
    uint32_t palcode;
1474
    int32_t disp21, disp16, disp12;
1475
    uint16_t fn11;
1476
    uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
1477
    uint8_t lit;
1478
    ExitStatus ret;
1479

    
1480
    /* Decode all instruction fields */
1481
    opc = insn >> 26;
1482
    ra = (insn >> 21) & 0x1F;
1483
    rb = (insn >> 16) & 0x1F;
1484
    rc = insn & 0x1F;
1485
    real_islit = islit = (insn >> 12) & 1;
1486
    if (rb == 31 && !islit) {
1487
        islit = 1;
1488
        lit = 0;
1489
    } else
1490
        lit = (insn >> 13) & 0xFF;
1491
    palcode = insn & 0x03FFFFFF;
1492
    disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1493
    disp16 = (int16_t)(insn & 0x0000FFFF);
1494
    disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1495
    fn11 = (insn >> 5) & 0x000007FF;
1496
    fpfn = fn11 & 0x3F;
1497
    fn7 = (insn >> 5) & 0x0000007F;
1498
    fn2 = (insn >> 5) & 0x00000003;
1499
    LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1500
              opc, ra, rb, rc, disp16);
1501

    
1502
    ret = NO_EXIT;
1503
    switch (opc) {
1504
    case 0x00:
1505
        /* CALL_PAL */
1506
#ifdef CONFIG_USER_ONLY
1507
        if (palcode == 0x9E) {
1508
            /* RDUNIQUE */
1509
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1510
            break;
1511
        } else if (palcode == 0x9F) {
1512
            /* WRUNIQUE */
1513
            tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1514
            break;
1515
        }
1516
#endif
1517
        if (palcode >= 0x80 && palcode < 0xC0) {
1518
            /* Unprivileged PAL call */
1519
            ret = gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
1520
            break;
1521
        }
1522
#ifndef CONFIG_USER_ONLY
1523
        if (palcode < 0x40) {
1524
            /* Privileged PAL code */
1525
            if (ctx->mem_idx & 1)
1526
                goto invalid_opc;
1527
            ret = gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
1528
        }
1529
#endif
1530
        /* Invalid PAL call */
1531
        goto invalid_opc;
1532
    case 0x01:
1533
        /* OPC01 */
1534
        goto invalid_opc;
1535
    case 0x02:
1536
        /* OPC02 */
1537
        goto invalid_opc;
1538
    case 0x03:
1539
        /* OPC03 */
1540
        goto invalid_opc;
1541
    case 0x04:
1542
        /* OPC04 */
1543
        goto invalid_opc;
1544
    case 0x05:
1545
        /* OPC05 */
1546
        goto invalid_opc;
1547
    case 0x06:
1548
        /* OPC06 */
1549
        goto invalid_opc;
1550
    case 0x07:
1551
        /* OPC07 */
1552
        goto invalid_opc;
1553
    case 0x08:
1554
        /* LDA */
1555
        if (likely(ra != 31)) {
1556
            if (rb != 31)
1557
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1558
            else
1559
                tcg_gen_movi_i64(cpu_ir[ra], disp16);
1560
        }
1561
        break;
1562
    case 0x09:
1563
        /* LDAH */
1564
        if (likely(ra != 31)) {
1565
            if (rb != 31)
1566
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1567
            else
1568
                tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1569
        }
1570
        break;
1571
    case 0x0A:
1572
        /* LDBU */
1573
        if (!(ctx->amask & AMASK_BWX))
1574
            goto invalid_opc;
1575
        gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1576
        break;
1577
    case 0x0B:
1578
        /* LDQ_U */
1579
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1580
        break;
1581
    case 0x0C:
1582
        /* LDWU */
1583
        if (!(ctx->amask & AMASK_BWX))
1584
            goto invalid_opc;
1585
        gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1586
        break;
1587
    case 0x0D:
1588
        /* STW */
1589
        gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1590
        break;
1591
    case 0x0E:
1592
        /* STB */
1593
        gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1594
        break;
1595
    case 0x0F:
1596
        /* STQ_U */
1597
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1598
        break;
1599
    case 0x10:
1600
        switch (fn7) {
1601
        case 0x00:
1602
            /* ADDL */
1603
            if (likely(rc != 31)) {
1604
                if (ra != 31) {
1605
                    if (islit) {
1606
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1607
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1608
                    } else {
1609
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1610
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1611
                    }
1612
                } else {
1613
                    if (islit)
1614
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1615
                    else
1616
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1617
                }
1618
            }
1619
            break;
1620
        case 0x02:
1621
            /* S4ADDL */
1622
            if (likely(rc != 31)) {
1623
                if (ra != 31) {
1624
                    TCGv tmp = tcg_temp_new();
1625
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1626
                    if (islit)
1627
                        tcg_gen_addi_i64(tmp, tmp, lit);
1628
                    else
1629
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1630
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1631
                    tcg_temp_free(tmp);
1632
                } else {
1633
                    if (islit)
1634
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1635
                    else
1636
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1637
                }
1638
            }
1639
            break;
1640
        case 0x09:
1641
            /* SUBL */
1642
            if (likely(rc != 31)) {
1643
                if (ra != 31) {
1644
                    if (islit)
1645
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1646
                    else
1647
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1648
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1649
                } else {
1650
                    if (islit)
1651
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1652
                    else {
1653
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1654
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1655
                }
1656
            }
1657
            break;
1658
        case 0x0B:
1659
            /* S4SUBL */
1660
            if (likely(rc != 31)) {
1661
                if (ra != 31) {
1662
                    TCGv tmp = tcg_temp_new();
1663
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1664
                    if (islit)
1665
                        tcg_gen_subi_i64(tmp, tmp, lit);
1666
                    else
1667
                        tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1668
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1669
                    tcg_temp_free(tmp);
1670
                } else {
1671
                    if (islit)
1672
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1673
                    else {
1674
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1675
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1676
                    }
1677
                }
1678
            }
1679
            break;
1680
        case 0x0F:
1681
            /* CMPBGE */
1682
            gen_cmpbge(ra, rb, rc, islit, lit);
1683
            break;
1684
        case 0x12:
1685
            /* S8ADDL */
1686
            if (likely(rc != 31)) {
1687
                if (ra != 31) {
1688
                    TCGv tmp = tcg_temp_new();
1689
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1690
                    if (islit)
1691
                        tcg_gen_addi_i64(tmp, tmp, lit);
1692
                    else
1693
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1694
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1695
                    tcg_temp_free(tmp);
1696
                } else {
1697
                    if (islit)
1698
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1699
                    else
1700
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1701
                }
1702
            }
1703
            break;
1704
        case 0x1B:
1705
            /* S8SUBL */
1706
            if (likely(rc != 31)) {
1707
                if (ra != 31) {
1708
                    TCGv tmp = tcg_temp_new();
1709
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1710
                    if (islit)
1711
                        tcg_gen_subi_i64(tmp, tmp, lit);
1712
                    else
1713
                       tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1714
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1715
                    tcg_temp_free(tmp);
1716
                } else {
1717
                    if (islit)
1718
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1719
                    else
1720
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1721
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1722
                    }
1723
                }
1724
            }
1725
            break;
1726
        case 0x1D:
1727
            /* CMPULT */
1728
            gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1729
            break;
1730
        case 0x20:
1731
            /* ADDQ */
1732
            if (likely(rc != 31)) {
1733
                if (ra != 31) {
1734
                    if (islit)
1735
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1736
                    else
1737
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1738
                } else {
1739
                    if (islit)
1740
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1741
                    else
1742
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1743
                }
1744
            }
1745
            break;
1746
        case 0x22:
1747
            /* S4ADDQ */
1748
            if (likely(rc != 31)) {
1749
                if (ra != 31) {
1750
                    TCGv tmp = tcg_temp_new();
1751
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1752
                    if (islit)
1753
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1754
                    else
1755
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1756
                    tcg_temp_free(tmp);
1757
                } else {
1758
                    if (islit)
1759
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1760
                    else
1761
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1762
                }
1763
            }
1764
            break;
1765
        case 0x29:
1766
            /* SUBQ */
1767
            if (likely(rc != 31)) {
1768
                if (ra != 31) {
1769
                    if (islit)
1770
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1771
                    else
1772
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1773
                } else {
1774
                    if (islit)
1775
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1776
                    else
1777
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1778
                }
1779
            }
1780
            break;
1781
        case 0x2B:
1782
            /* S4SUBQ */
1783
            if (likely(rc != 31)) {
1784
                if (ra != 31) {
1785
                    TCGv tmp = tcg_temp_new();
1786
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1787
                    if (islit)
1788
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1789
                    else
1790
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1791
                    tcg_temp_free(tmp);
1792
                } else {
1793
                    if (islit)
1794
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1795
                    else
1796
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1797
                }
1798
            }
1799
            break;
1800
        case 0x2D:
1801
            /* CMPEQ */
1802
            gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1803
            break;
1804
        case 0x32:
1805
            /* S8ADDQ */
1806
            if (likely(rc != 31)) {
1807
                if (ra != 31) {
1808
                    TCGv tmp = tcg_temp_new();
1809
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1810
                    if (islit)
1811
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1812
                    else
1813
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1814
                    tcg_temp_free(tmp);
1815
                } else {
1816
                    if (islit)
1817
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1818
                    else
1819
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1820
                }
1821
            }
1822
            break;
1823
        case 0x3B:
1824
            /* S8SUBQ */
1825
            if (likely(rc != 31)) {
1826
                if (ra != 31) {
1827
                    TCGv tmp = tcg_temp_new();
1828
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1829
                    if (islit)
1830
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1831
                    else
1832
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1833
                    tcg_temp_free(tmp);
1834
                } else {
1835
                    if (islit)
1836
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1837
                    else
1838
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1839
                }
1840
            }
1841
            break;
1842
        case 0x3D:
1843
            /* CMPULE */
1844
            gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1845
            break;
1846
        case 0x40:
1847
            /* ADDL/V */
1848
            gen_addlv(ra, rb, rc, islit, lit);
1849
            break;
1850
        case 0x49:
1851
            /* SUBL/V */
1852
            gen_sublv(ra, rb, rc, islit, lit);
1853
            break;
1854
        case 0x4D:
1855
            /* CMPLT */
1856
            gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1857
            break;
1858
        case 0x60:
1859
            /* ADDQ/V */
1860
            gen_addqv(ra, rb, rc, islit, lit);
1861
            break;
1862
        case 0x69:
1863
            /* SUBQ/V */
1864
            gen_subqv(ra, rb, rc, islit, lit);
1865
            break;
1866
        case 0x6D:
1867
            /* CMPLE */
1868
            gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1869
            break;
1870
        default:
1871
            goto invalid_opc;
1872
        }
1873
        break;
1874
    case 0x11:
1875
        switch (fn7) {
1876
        case 0x00:
1877
            /* AND */
1878
            if (likely(rc != 31)) {
1879
                if (ra == 31)
1880
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
1881
                else if (islit)
1882
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1883
                else
1884
                    tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1885
            }
1886
            break;
1887
        case 0x08:
1888
            /* BIC */
1889
            if (likely(rc != 31)) {
1890
                if (ra != 31) {
1891
                    if (islit)
1892
                        tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1893
                    else
1894
                        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1895
                } else
1896
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
1897
            }
1898
            break;
1899
        case 0x14:
1900
            /* CMOVLBS */
1901
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1902
            break;
1903
        case 0x16:
1904
            /* CMOVLBC */
1905
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1906
            break;
1907
        case 0x20:
1908
            /* BIS */
1909
            if (likely(rc != 31)) {
1910
                if (ra != 31) {
1911
                    if (islit)
1912
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1913
                    else
1914
                        tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1915
                } else {
1916
                    if (islit)
1917
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1918
                    else
1919
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1920
                }
1921
            }
1922
            break;
1923
        case 0x24:
1924
            /* CMOVEQ */
1925
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1926
            break;
1927
        case 0x26:
1928
            /* CMOVNE */
1929
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1930
            break;
1931
        case 0x28:
1932
            /* ORNOT */
1933
            if (likely(rc != 31)) {
1934
                if (ra != 31) {
1935
                    if (islit)
1936
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1937
                    else
1938
                        tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1939
                } else {
1940
                    if (islit)
1941
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1942
                    else
1943
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1944
                }
1945
            }
1946
            break;
1947
        case 0x40:
1948
            /* XOR */
1949
            if (likely(rc != 31)) {
1950
                if (ra != 31) {
1951
                    if (islit)
1952
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1953
                    else
1954
                        tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1955
                } else {
1956
                    if (islit)
1957
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1958
                    else
1959
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1960
                }
1961
            }
1962
            break;
1963
        case 0x44:
1964
            /* CMOVLT */
1965
            gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1966
            break;
1967
        case 0x46:
1968
            /* CMOVGE */
1969
            gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1970
            break;
1971
        case 0x48:
1972
            /* EQV */
1973
            if (likely(rc != 31)) {
1974
                if (ra != 31) {
1975
                    if (islit)
1976
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1977
                    else
1978
                        tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1979
                } else {
1980
                    if (islit)
1981
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1982
                    else
1983
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1984
                }
1985
            }
1986
            break;
1987
        case 0x61:
1988
            /* AMASK */
1989
            if (likely(rc != 31)) {
1990
                if (islit)
1991
                    tcg_gen_movi_i64(cpu_ir[rc], lit);
1992
                else
1993
                    tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1994
                switch (ctx->env->implver) {
1995
                case IMPLVER_2106x:
1996
                    /* EV4, EV45, LCA, LCA45 & EV5 */
1997
                    break;
1998
                case IMPLVER_21164:
1999
                case IMPLVER_21264:
2000
                case IMPLVER_21364:
2001
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
2002
                                     ~(uint64_t)ctx->amask);
2003
                    break;
2004
                }
2005
            }
2006
            break;
2007
        case 0x64:
2008
            /* CMOVLE */
2009
            gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2010
            break;
2011
        case 0x66:
2012
            /* CMOVGT */
2013
            gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2014
            break;
2015
        case 0x6C:
2016
            /* IMPLVER */
2017
            if (rc != 31)
2018
                tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2019
            break;
2020
        default:
2021
            goto invalid_opc;
2022
        }
2023
        break;
2024
    case 0x12:
2025
        switch (fn7) {
2026
        case 0x02:
2027
            /* MSKBL */
2028
            gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2029
            break;
2030
        case 0x06:
2031
            /* EXTBL */
2032
            gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2033
            break;
2034
        case 0x0B:
2035
            /* INSBL */
2036
            gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2037
            break;
2038
        case 0x12:
2039
            /* MSKWL */
2040
            gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2041
            break;
2042
        case 0x16:
2043
            /* EXTWL */
2044
            gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2045
            break;
2046
        case 0x1B:
2047
            /* INSWL */
2048
            gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2049
            break;
2050
        case 0x22:
2051
            /* MSKLL */
2052
            gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2053
            break;
2054
        case 0x26:
2055
            /* EXTLL */
2056
            gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2057
            break;
2058
        case 0x2B:
2059
            /* INSLL */
2060
            gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2061
            break;
2062
        case 0x30:
2063
            /* ZAP */
2064
            gen_zap(ra, rb, rc, islit, lit);
2065
            break;
2066
        case 0x31:
2067
            /* ZAPNOT */
2068
            gen_zapnot(ra, rb, rc, islit, lit);
2069
            break;
2070
        case 0x32:
2071
            /* MSKQL */
2072
            gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2073
            break;
2074
        case 0x34:
2075
            /* SRL */
2076
            if (likely(rc != 31)) {
2077
                if (ra != 31) {
2078
                    if (islit)
2079
                        tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2080
                    else {
2081
                        TCGv shift = tcg_temp_new();
2082
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2083
                        tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2084
                        tcg_temp_free(shift);
2085
                    }
2086
                } else
2087
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2088
            }
2089
            break;
2090
        case 0x36:
2091
            /* EXTQL */
2092
            gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2093
            break;
2094
        case 0x39:
2095
            /* SLL */
2096
            if (likely(rc != 31)) {
2097
                if (ra != 31) {
2098
                    if (islit)
2099
                        tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2100
                    else {
2101
                        TCGv shift = tcg_temp_new();
2102
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2103
                        tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2104
                        tcg_temp_free(shift);
2105
                    }
2106
                } else
2107
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2108
            }
2109
            break;
2110
        case 0x3B:
2111
            /* INSQL */
2112
            gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2113
            break;
2114
        case 0x3C:
2115
            /* SRA */
2116
            if (likely(rc != 31)) {
2117
                if (ra != 31) {
2118
                    if (islit)
2119
                        tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2120
                    else {
2121
                        TCGv shift = tcg_temp_new();
2122
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2123
                        tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2124
                        tcg_temp_free(shift);
2125
                    }
2126
                } else
2127
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2128
            }
2129
            break;
2130
        case 0x52:
2131
            /* MSKWH */
2132
            gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2133
            break;
2134
        case 0x57:
2135
            /* INSWH */
2136
            gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2137
            break;
2138
        case 0x5A:
2139
            /* EXTWH */
2140
            gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2141
            break;
2142
        case 0x62:
2143
            /* MSKLH */
2144
            gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2145
            break;
2146
        case 0x67:
2147
            /* INSLH */
2148
            gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2149
            break;
2150
        case 0x6A:
2151
            /* EXTLH */
2152
            gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2153
            break;
2154
        case 0x72:
2155
            /* MSKQH */
2156
            gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2157
            break;
2158
        case 0x77:
2159
            /* INSQH */
2160
            gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2161
            break;
2162
        case 0x7A:
2163
            /* EXTQH */
2164
            gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2165
            break;
2166
        default:
2167
            goto invalid_opc;
2168
        }
2169
        break;
2170
    case 0x13:
2171
        switch (fn7) {
2172
        case 0x00:
2173
            /* MULL */
2174
            if (likely(rc != 31)) {
2175
                if (ra == 31)
2176
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2177
                else {
2178
                    if (islit)
2179
                        tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2180
                    else
2181
                        tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2182
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2183
                }
2184
            }
2185
            break;
2186
        case 0x20:
2187
            /* MULQ */
2188
            if (likely(rc != 31)) {
2189
                if (ra == 31)
2190
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2191
                else if (islit)
2192
                    tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2193
                else
2194
                    tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2195
            }
2196
            break;
2197
        case 0x30:
2198
            /* UMULH */
2199
            gen_umulh(ra, rb, rc, islit, lit);
2200
            break;
2201
        case 0x40:
2202
            /* MULL/V */
2203
            gen_mullv(ra, rb, rc, islit, lit);
2204
            break;
2205
        case 0x60:
2206
            /* MULQ/V */
2207
            gen_mulqv(ra, rb, rc, islit, lit);
2208
            break;
2209
        default:
2210
            goto invalid_opc;
2211
        }
2212
        break;
2213
    case 0x14:
2214
        switch (fpfn) { /* fn11 & 0x3F */
2215
        case 0x04:
2216
            /* ITOFS */
2217
            if (!(ctx->amask & AMASK_FIX))
2218
                goto invalid_opc;
2219
            if (likely(rc != 31)) {
2220
                if (ra != 31) {
2221
                    TCGv_i32 tmp = tcg_temp_new_i32();
2222
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2223
                    gen_helper_memory_to_s(cpu_fir[rc], tmp);
2224
                    tcg_temp_free_i32(tmp);
2225
                } else
2226
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2227
            }
2228
            break;
2229
        case 0x0A:
2230
            /* SQRTF */
2231
            if (!(ctx->amask & AMASK_FIX))
2232
                goto invalid_opc;
2233
            gen_fsqrtf(rb, rc);
2234
            break;
2235
        case 0x0B:
2236
            /* SQRTS */
2237
            if (!(ctx->amask & AMASK_FIX))
2238
                goto invalid_opc;
2239
            gen_fsqrts(ctx, rb, rc, fn11);
2240
            break;
2241
        case 0x14:
2242
            /* ITOFF */
2243
            if (!(ctx->amask & AMASK_FIX))
2244
                goto invalid_opc;
2245
            if (likely(rc != 31)) {
2246
                if (ra != 31) {
2247
                    TCGv_i32 tmp = tcg_temp_new_i32();
2248
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2249
                    gen_helper_memory_to_f(cpu_fir[rc], tmp);
2250
                    tcg_temp_free_i32(tmp);
2251
                } else
2252
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2253
            }
2254
            break;
2255
        case 0x24:
2256
            /* ITOFT */
2257
            if (!(ctx->amask & AMASK_FIX))
2258
                goto invalid_opc;
2259
            if (likely(rc != 31)) {
2260
                if (ra != 31)
2261
                    tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2262
                else
2263
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2264
            }
2265
            break;
2266
        case 0x2A:
2267
            /* SQRTG */
2268
            if (!(ctx->amask & AMASK_FIX))
2269
                goto invalid_opc;
2270
            gen_fsqrtg(rb, rc);
2271
            break;
2272
        case 0x02B:
2273
            /* SQRTT */
2274
            if (!(ctx->amask & AMASK_FIX))
2275
                goto invalid_opc;
2276
            gen_fsqrtt(ctx, rb, rc, fn11);
2277
            break;
2278
        default:
2279
            goto invalid_opc;
2280
        }
2281
        break;
2282
    case 0x15:
2283
        /* VAX floating point */
2284
        /* XXX: rounding mode and trap are ignored (!) */
2285
        switch (fpfn) { /* fn11 & 0x3F */
2286
        case 0x00:
2287
            /* ADDF */
2288
            gen_faddf(ra, rb, rc);
2289
            break;
2290
        case 0x01:
2291
            /* SUBF */
2292
            gen_fsubf(ra, rb, rc);
2293
            break;
2294
        case 0x02:
2295
            /* MULF */
2296
            gen_fmulf(ra, rb, rc);
2297
            break;
2298
        case 0x03:
2299
            /* DIVF */
2300
            gen_fdivf(ra, rb, rc);
2301
            break;
2302
        case 0x1E:
2303
            /* CVTDG */
2304
#if 0 // TODO
2305
            gen_fcvtdg(rb, rc);
2306
#else
2307
            goto invalid_opc;
2308
#endif
2309
            break;
2310
        case 0x20:
2311
            /* ADDG */
2312
            gen_faddg(ra, rb, rc);
2313
            break;
2314
        case 0x21:
2315
            /* SUBG */
2316
            gen_fsubg(ra, rb, rc);
2317
            break;
2318
        case 0x22:
2319
            /* MULG */
2320
            gen_fmulg(ra, rb, rc);
2321
            break;
2322
        case 0x23:
2323
            /* DIVG */
2324
            gen_fdivg(ra, rb, rc);
2325
            break;
2326
        case 0x25:
2327
            /* CMPGEQ */
2328
            gen_fcmpgeq(ra, rb, rc);
2329
            break;
2330
        case 0x26:
2331
            /* CMPGLT */
2332
            gen_fcmpglt(ra, rb, rc);
2333
            break;
2334
        case 0x27:
2335
            /* CMPGLE */
2336
            gen_fcmpgle(ra, rb, rc);
2337
            break;
2338
        case 0x2C:
2339
            /* CVTGF */
2340
            gen_fcvtgf(rb, rc);
2341
            break;
2342
        case 0x2D:
2343
            /* CVTGD */
2344
#if 0 // TODO
2345
            gen_fcvtgd(rb, rc);
2346
#else
2347
            goto invalid_opc;
2348
#endif
2349
            break;
2350
        case 0x2F:
2351
            /* CVTGQ */
2352
            gen_fcvtgq(rb, rc);
2353
            break;
2354
        case 0x3C:
2355
            /* CVTQF */
2356
            gen_fcvtqf(rb, rc);
2357
            break;
2358
        case 0x3E:
2359
            /* CVTQG */
2360
            gen_fcvtqg(rb, rc);
2361
            break;
2362
        default:
2363
            goto invalid_opc;
2364
        }
2365
        break;
2366
    case 0x16:
2367
        /* IEEE floating-point */
2368
        switch (fpfn) { /* fn11 & 0x3F */
2369
        case 0x00:
2370
            /* ADDS */
2371
            gen_fadds(ctx, ra, rb, rc, fn11);
2372
            break;
2373
        case 0x01:
2374
            /* SUBS */
2375
            gen_fsubs(ctx, ra, rb, rc, fn11);
2376
            break;
2377
        case 0x02:
2378
            /* MULS */
2379
            gen_fmuls(ctx, ra, rb, rc, fn11);
2380
            break;
2381
        case 0x03:
2382
            /* DIVS */
2383
            gen_fdivs(ctx, ra, rb, rc, fn11);
2384
            break;
2385
        case 0x20:
2386
            /* ADDT */
2387
            gen_faddt(ctx, ra, rb, rc, fn11);
2388
            break;
2389
        case 0x21:
2390
            /* SUBT */
2391
            gen_fsubt(ctx, ra, rb, rc, fn11);
2392
            break;
2393
        case 0x22:
2394
            /* MULT */
2395
            gen_fmult(ctx, ra, rb, rc, fn11);
2396
            break;
2397
        case 0x23:
2398
            /* DIVT */
2399
            gen_fdivt(ctx, ra, rb, rc, fn11);
2400
            break;
2401
        case 0x24:
2402
            /* CMPTUN */
2403
            gen_fcmptun(ctx, ra, rb, rc, fn11);
2404
            break;
2405
        case 0x25:
2406
            /* CMPTEQ */
2407
            gen_fcmpteq(ctx, ra, rb, rc, fn11);
2408
            break;
2409
        case 0x26:
2410
            /* CMPTLT */
2411
            gen_fcmptlt(ctx, ra, rb, rc, fn11);
2412
            break;
2413
        case 0x27:
2414
            /* CMPTLE */
2415
            gen_fcmptle(ctx, ra, rb, rc, fn11);
2416
            break;
2417
        case 0x2C:
2418
            if (fn11 == 0x2AC || fn11 == 0x6AC) {
2419
                /* CVTST */
2420
                gen_fcvtst(ctx, rb, rc, fn11);
2421
            } else {
2422
                /* CVTTS */
2423
                gen_fcvtts(ctx, rb, rc, fn11);
2424
            }
2425
            break;
2426
        case 0x2F:
2427
            /* CVTTQ */
2428
            gen_fcvttq(ctx, rb, rc, fn11);
2429
            break;
2430
        case 0x3C:
2431
            /* CVTQS */
2432
            gen_fcvtqs(ctx, rb, rc, fn11);
2433
            break;
2434
        case 0x3E:
2435
            /* CVTQT */
2436
            gen_fcvtqt(ctx, rb, rc, fn11);
2437
            break;
2438
        default:
2439
            goto invalid_opc;
2440
        }
2441
        break;
2442
    case 0x17:
2443
        switch (fn11) {
2444
        case 0x010:
2445
            /* CVTLQ */
2446
            gen_fcvtlq(rb, rc);
2447
            break;
2448
        case 0x020:
2449
            if (likely(rc != 31)) {
2450
                if (ra == rb) {
2451
                    /* FMOV */
2452
                    if (ra == 31)
2453
                        tcg_gen_movi_i64(cpu_fir[rc], 0);
2454
                    else
2455
                        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2456
                } else {
2457
                    /* CPYS */
2458
                    gen_fcpys(ra, rb, rc);
2459
                }
2460
            }
2461
            break;
2462
        case 0x021:
2463
            /* CPYSN */
2464
            gen_fcpysn(ra, rb, rc);
2465
            break;
2466
        case 0x022:
2467
            /* CPYSE */
2468
            gen_fcpyse(ra, rb, rc);
2469
            break;
2470
        case 0x024:
2471
            /* MT_FPCR */
2472
            if (likely(ra != 31))
2473
                gen_helper_store_fpcr(cpu_fir[ra]);
2474
            else {
2475
                TCGv tmp = tcg_const_i64(0);
2476
                gen_helper_store_fpcr(tmp);
2477
                tcg_temp_free(tmp);
2478
            }
2479
            break;
2480
        case 0x025:
2481
            /* MF_FPCR */
2482
            if (likely(ra != 31))
2483
                gen_helper_load_fpcr(cpu_fir[ra]);
2484
            break;
2485
        case 0x02A:
2486
            /* FCMOVEQ */
2487
            gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2488
            break;
2489
        case 0x02B:
2490
            /* FCMOVNE */
2491
            gen_fcmov(TCG_COND_NE, ra, rb, rc);
2492
            break;
2493
        case 0x02C:
2494
            /* FCMOVLT */
2495
            gen_fcmov(TCG_COND_LT, ra, rb, rc);
2496
            break;
2497
        case 0x02D:
2498
            /* FCMOVGE */
2499
            gen_fcmov(TCG_COND_GE, ra, rb, rc);
2500
            break;
2501
        case 0x02E:
2502
            /* FCMOVLE */
2503
            gen_fcmov(TCG_COND_LE, ra, rb, rc);
2504
            break;
2505
        case 0x02F:
2506
            /* FCMOVGT */
2507
            gen_fcmov(TCG_COND_GT, ra, rb, rc);
2508
            break;
2509
        case 0x030:
2510
            /* CVTQL */
2511
            gen_fcvtql(rb, rc);
2512
            break;
2513
        case 0x130:
2514
            /* CVTQL/V */
2515
        case 0x530:
2516
            /* CVTQL/SV */
2517
            /* ??? I'm pretty sure there's nothing that /sv needs to do that
2518
               /v doesn't do.  The only thing I can think is that /sv is a
2519
               valid instruction merely for completeness in the ISA.  */
2520
            gen_fcvtql_v(ctx, rb, rc);
2521
            break;
2522
        default:
2523
            goto invalid_opc;
2524
        }
2525
        break;
2526
    case 0x18:
2527
        switch ((uint16_t)disp16) {
2528
        case 0x0000:
2529
            /* TRAPB */
2530
            /* No-op.  */
2531
            break;
2532
        case 0x0400:
2533
            /* EXCB */
2534
            /* No-op.  */
2535
            break;
2536
        case 0x4000:
2537
            /* MB */
2538
            /* No-op */
2539
            break;
2540
        case 0x4400:
2541
            /* WMB */
2542
            /* No-op */
2543
            break;
2544
        case 0x8000:
2545
            /* FETCH */
2546
            /* No-op */
2547
            break;
2548
        case 0xA000:
2549
            /* FETCH_M */
2550
            /* No-op */
2551
            break;
2552
        case 0xC000:
2553
            /* RPCC */
2554
            if (ra != 31)
2555
                gen_helper_load_pcc(cpu_ir[ra]);
2556
            break;
2557
        case 0xE000:
2558
            /* RC */
2559
            gen_rx(ra, 0);
2560
            break;
2561
        case 0xE800:
2562
            /* ECB */
2563
            break;
2564
        case 0xF000:
2565
            /* RS */
2566
            gen_rx(ra, 1);
2567
            break;
2568
        case 0xF800:
2569
            /* WH64 */
2570
            /* No-op */
2571
            break;
2572
        default:
2573
            goto invalid_opc;
2574
        }
2575
        break;
2576
    case 0x19:
2577
        /* HW_MFPR (PALcode) */
2578
#if defined (CONFIG_USER_ONLY)
2579
        goto invalid_opc;
2580
#else
2581
        if (!ctx->pal_mode)
2582
            goto invalid_opc;
2583
        if (ra != 31) {
2584
            TCGv tmp = tcg_const_i32(insn & 0xFF);
2585
            gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
2586
            tcg_temp_free(tmp);
2587
        }
2588
        break;
2589
#endif
2590
    case 0x1A:
2591
        /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2592
           prediction stack action, which of course we don't implement.  */
2593
        if (rb != 31) {
2594
            tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2595
        } else {
2596
            tcg_gen_movi_i64(cpu_pc, 0);
2597
        }
2598
        if (ra != 31) {
2599
            tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2600
        }
2601
        ret = EXIT_PC_UPDATED;
2602
        break;
2603
    case 0x1B:
2604
        /* HW_LD (PALcode) */
2605
#if defined (CONFIG_USER_ONLY)
2606
        goto invalid_opc;
2607
#else
2608
        if (!ctx->pal_mode)
2609
            goto invalid_opc;
2610
        if (ra != 31) {
2611
            TCGv addr = tcg_temp_new();
2612
            if (rb != 31)
2613
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2614
            else
2615
                tcg_gen_movi_i64(addr, disp12);
2616
            switch ((insn >> 12) & 0xF) {
2617
            case 0x0:
2618
                /* Longword physical access (hw_ldl/p) */
2619
                gen_helper_ldl_raw(cpu_ir[ra], addr);
2620
                break;
2621
            case 0x1:
2622
                /* Quadword physical access (hw_ldq/p) */
2623
                gen_helper_ldq_raw(cpu_ir[ra], addr);
2624
                break;
2625
            case 0x2:
2626
                /* Longword physical access with lock (hw_ldl_l/p) */
2627
                gen_helper_ldl_l_raw(cpu_ir[ra], addr);
2628
                break;
2629
            case 0x3:
2630
                /* Quadword physical access with lock (hw_ldq_l/p) */
2631
                gen_helper_ldq_l_raw(cpu_ir[ra], addr);
2632
                break;
2633
            case 0x4:
2634
                /* Longword virtual PTE fetch (hw_ldl/v) */
2635
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2636
                break;
2637
            case 0x5:
2638
                /* Quadword virtual PTE fetch (hw_ldq/v) */
2639
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2640
                break;
2641
            case 0x6:
2642
                /* Incpu_ir[ra]id */
2643
                goto invalid_opc;
2644
            case 0x7:
2645
                /* Incpu_ir[ra]id */
2646
                goto invalid_opc;
2647
            case 0x8:
2648
                /* Longword virtual access (hw_ldl) */
2649
                gen_helper_st_virt_to_phys(addr, addr);
2650
                gen_helper_ldl_raw(cpu_ir[ra], addr);
2651
                break;
2652
            case 0x9:
2653
                /* Quadword virtual access (hw_ldq) */
2654
                gen_helper_st_virt_to_phys(addr, addr);
2655
                gen_helper_ldq_raw(cpu_ir[ra], addr);
2656
                break;
2657
            case 0xA:
2658
                /* Longword virtual access with protection check (hw_ldl/w) */
2659
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2660
                break;
2661
            case 0xB:
2662
                /* Quadword virtual access with protection check (hw_ldq/w) */
2663
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2664
                break;
2665
            case 0xC:
2666
                /* Longword virtual access with alt access mode (hw_ldl/a)*/
2667
                gen_helper_set_alt_mode();
2668
                gen_helper_st_virt_to_phys(addr, addr);
2669
                gen_helper_ldl_raw(cpu_ir[ra], addr);
2670
                gen_helper_restore_mode();
2671
                break;
2672
            case 0xD:
2673
                /* Quadword virtual access with alt access mode (hw_ldq/a) */
2674
                gen_helper_set_alt_mode();
2675
                gen_helper_st_virt_to_phys(addr, addr);
2676
                gen_helper_ldq_raw(cpu_ir[ra], addr);
2677
                gen_helper_restore_mode();
2678
                break;
2679
            case 0xE:
2680
                /* Longword virtual access with alternate access mode and
2681
                 * protection checks (hw_ldl/wa)
2682
                 */
2683
                gen_helper_set_alt_mode();
2684
                gen_helper_ldl_data(cpu_ir[ra], addr);
2685
                gen_helper_restore_mode();
2686
                break;
2687
            case 0xF:
2688
                /* Quadword virtual access with alternate access mode and
2689
                 * protection checks (hw_ldq/wa)
2690
                 */
2691
                gen_helper_set_alt_mode();
2692
                gen_helper_ldq_data(cpu_ir[ra], addr);
2693
                gen_helper_restore_mode();
2694
                break;
2695
            }
2696
            tcg_temp_free(addr);
2697
        }
2698
        break;
2699
#endif
2700
    case 0x1C:
2701
        switch (fn7) {
2702
        case 0x00:
2703
            /* SEXTB */
2704
            if (!(ctx->amask & AMASK_BWX))
2705
                goto invalid_opc;
2706
            if (likely(rc != 31)) {
2707
                if (islit)
2708
                    tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2709
                else
2710
                    tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2711
            }
2712
            break;
2713
        case 0x01:
2714
            /* SEXTW */
2715
            if (!(ctx->amask & AMASK_BWX))
2716
                goto invalid_opc;
2717
            if (likely(rc != 31)) {
2718
                if (islit)
2719
                    tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2720
                else
2721
                    tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2722
            }
2723
            break;
2724
        case 0x30:
2725
            /* CTPOP */
2726
            if (!(ctx->amask & AMASK_CIX))
2727
                goto invalid_opc;
2728
            if (likely(rc != 31)) {
2729
                if (islit)
2730
                    tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2731
                else
2732
                    gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2733
            }
2734
            break;
2735
        case 0x31:
2736
            /* PERR */
2737
            if (!(ctx->amask & AMASK_MVI))
2738
                goto invalid_opc;
2739
            gen_perr(ra, rb, rc, islit, lit);
2740
            break;
2741
        case 0x32:
2742
            /* CTLZ */
2743
            if (!(ctx->amask & AMASK_CIX))
2744
                goto invalid_opc;
2745
            if (likely(rc != 31)) {
2746
                if (islit)
2747
                    tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2748
                else
2749
                    gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2750
            }
2751
            break;
2752
        case 0x33:
2753
            /* CTTZ */
2754
            if (!(ctx->amask & AMASK_CIX))
2755
                goto invalid_opc;
2756
            if (likely(rc != 31)) {
2757
                if (islit)
2758
                    tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2759
                else
2760
                    gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2761
            }
2762
            break;
2763
        case 0x34:
2764
            /* UNPKBW */
2765
            if (!(ctx->amask & AMASK_MVI))
2766
                goto invalid_opc;
2767
            if (real_islit || ra != 31)
2768
                goto invalid_opc;
2769
            gen_unpkbw (rb, rc);
2770
            break;
2771
        case 0x35:
2772
            /* UNPKBL */
2773
            if (!(ctx->amask & AMASK_MVI))
2774
                goto invalid_opc;
2775
            if (real_islit || ra != 31)
2776
                goto invalid_opc;
2777
            gen_unpkbl (rb, rc);
2778
            break;
2779
        case 0x36:
2780
            /* PKWB */
2781
            if (!(ctx->amask & AMASK_MVI))
2782
                goto invalid_opc;
2783
            if (real_islit || ra != 31)
2784
                goto invalid_opc;
2785
            gen_pkwb (rb, rc);
2786
            break;
2787
        case 0x37:
2788
            /* PKLB */
2789
            if (!(ctx->amask & AMASK_MVI))
2790
                goto invalid_opc;
2791
            if (real_islit || ra != 31)
2792
                goto invalid_opc;
2793
            gen_pklb (rb, rc);
2794
            break;
2795
        case 0x38:
2796
            /* MINSB8 */
2797
            if (!(ctx->amask & AMASK_MVI))
2798
                goto invalid_opc;
2799
            gen_minsb8 (ra, rb, rc, islit, lit);
2800
            break;
2801
        case 0x39:
2802
            /* MINSW4 */
2803
            if (!(ctx->amask & AMASK_MVI))
2804
                goto invalid_opc;
2805
            gen_minsw4 (ra, rb, rc, islit, lit);
2806
            break;
2807
        case 0x3A:
2808
            /* MINUB8 */
2809
            if (!(ctx->amask & AMASK_MVI))
2810
                goto invalid_opc;
2811
            gen_minub8 (ra, rb, rc, islit, lit);
2812
            break;
2813
        case 0x3B:
2814
            /* MINUW4 */
2815
            if (!(ctx->amask & AMASK_MVI))
2816
                goto invalid_opc;
2817
            gen_minuw4 (ra, rb, rc, islit, lit);
2818
            break;
2819
        case 0x3C:
2820
            /* MAXUB8 */
2821
            if (!(ctx->amask & AMASK_MVI))
2822
                goto invalid_opc;
2823
            gen_maxub8 (ra, rb, rc, islit, lit);
2824
            break;
2825
        case 0x3D:
2826
            /* MAXUW4 */
2827
            if (!(ctx->amask & AMASK_MVI))
2828
                goto invalid_opc;
2829
            gen_maxuw4 (ra, rb, rc, islit, lit);
2830
            break;
2831
        case 0x3E:
2832
            /* MAXSB8 */
2833
            if (!(ctx->amask & AMASK_MVI))
2834
                goto invalid_opc;
2835
            gen_maxsb8 (ra, rb, rc, islit, lit);
2836
            break;
2837
        case 0x3F:
2838
            /* MAXSW4 */
2839
            if (!(ctx->amask & AMASK_MVI))
2840
                goto invalid_opc;
2841
            gen_maxsw4 (ra, rb, rc, islit, lit);
2842
            break;
2843
        case 0x70:
2844
            /* FTOIT */
2845
            if (!(ctx->amask & AMASK_FIX))
2846
                goto invalid_opc;
2847
            if (likely(rc != 31)) {
2848
                if (ra != 31)
2849
                    tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2850
                else
2851
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2852
            }
2853
            break;
2854
        case 0x78:
2855
            /* FTOIS */
2856
            if (!(ctx->amask & AMASK_FIX))
2857
                goto invalid_opc;
2858
            if (rc != 31) {
2859
                TCGv_i32 tmp1 = tcg_temp_new_i32();
2860
                if (ra != 31)
2861
                    gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2862
                else {
2863
                    TCGv tmp2 = tcg_const_i64(0);
2864
                    gen_helper_s_to_memory(tmp1, tmp2);
2865
                    tcg_temp_free(tmp2);
2866
                }
2867
                tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2868
                tcg_temp_free_i32(tmp1);
2869
            }
2870
            break;
2871
        default:
2872
            goto invalid_opc;
2873
        }
2874
        break;
2875
    case 0x1D:
2876
        /* HW_MTPR (PALcode) */
2877
#if defined (CONFIG_USER_ONLY)
2878
        goto invalid_opc;
2879
#else
2880
        if (!ctx->pal_mode)
2881
            goto invalid_opc;
2882
        else {
2883
            TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2884
            if (ra != 31)
2885
                gen_helper_mtpr(tmp1, cpu_ir[ra]);
2886
            else {
2887
                TCGv tmp2 = tcg_const_i64(0);
2888
                gen_helper_mtpr(tmp1, tmp2);
2889
                tcg_temp_free(tmp2);
2890
            }
2891
            tcg_temp_free(tmp1);
2892
            ret = EXIT_PC_STALE;
2893
        }
2894
        break;
2895
#endif
2896
    case 0x1E:
2897
        /* HW_REI (PALcode) */
2898
#if defined (CONFIG_USER_ONLY)
2899
        goto invalid_opc;
2900
#else
2901
        if (!ctx->pal_mode)
2902
            goto invalid_opc;
2903
        if (rb == 31) {
2904
            /* "Old" alpha */
2905
            gen_helper_hw_rei();
2906
        } else {
2907
            TCGv tmp;
2908

    
2909
            if (ra != 31) {
2910
                tmp = tcg_temp_new();
2911
                tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2912
            } else
2913
                tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2914
            gen_helper_hw_ret(tmp);
2915
            tcg_temp_free(tmp);
2916
        }
2917
        ret = EXIT_PC_UPDATED;
2918
        break;
2919
#endif
2920
    case 0x1F:
2921
        /* HW_ST (PALcode) */
2922
#if defined (CONFIG_USER_ONLY)
2923
        goto invalid_opc;
2924
#else
2925
        if (!ctx->pal_mode)
2926
            goto invalid_opc;
2927
        else {
2928
            TCGv addr, val;
2929
            addr = tcg_temp_new();
2930
            if (rb != 31)
2931
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2932
            else
2933
                tcg_gen_movi_i64(addr, disp12);
2934
            if (ra != 31)
2935
                val = cpu_ir[ra];
2936
            else {
2937
                val = tcg_temp_new();
2938
                tcg_gen_movi_i64(val, 0);
2939
            }
2940
            switch ((insn >> 12) & 0xF) {
2941
            case 0x0:
2942
                /* Longword physical access */
2943
                gen_helper_stl_raw(val, addr);
2944
                break;
2945
            case 0x1:
2946
                /* Quadword physical access */
2947
                gen_helper_stq_raw(val, addr);
2948
                break;
2949
            case 0x2:
2950
                /* Longword physical access with lock */
2951
                gen_helper_stl_c_raw(val, val, addr);
2952
                break;
2953
            case 0x3:
2954
                /* Quadword physical access with lock */
2955
                gen_helper_stq_c_raw(val, val, addr);
2956
                break;
2957
            case 0x4:
2958
                /* Longword virtual access */
2959
                gen_helper_st_virt_to_phys(addr, addr);
2960
                gen_helper_stl_raw(val, addr);
2961
                break;
2962
            case 0x5:
2963
                /* Quadword virtual access */
2964
                gen_helper_st_virt_to_phys(addr, addr);
2965
                gen_helper_stq_raw(val, addr);
2966
                break;
2967
            case 0x6:
2968
                /* Invalid */
2969
                goto invalid_opc;
2970
            case 0x7:
2971
                /* Invalid */
2972
                goto invalid_opc;
2973
            case 0x8:
2974
                /* Invalid */
2975
                goto invalid_opc;
2976
            case 0x9:
2977
                /* Invalid */
2978
                goto invalid_opc;
2979
            case 0xA:
2980
                /* Invalid */
2981
                goto invalid_opc;
2982
            case 0xB:
2983
                /* Invalid */
2984
                goto invalid_opc;
2985
            case 0xC:
2986
                /* Longword virtual access with alternate access mode */
2987
                gen_helper_set_alt_mode();
2988
                gen_helper_st_virt_to_phys(addr, addr);
2989
                gen_helper_stl_raw(val, addr);
2990
                gen_helper_restore_mode();
2991
                break;
2992
            case 0xD:
2993
                /* Quadword virtual access with alternate access mode */
2994
                gen_helper_set_alt_mode();
2995
                gen_helper_st_virt_to_phys(addr, addr);
2996
                gen_helper_stl_raw(val, addr);
2997
                gen_helper_restore_mode();
2998
                break;
2999
            case 0xE:
3000
                /* Invalid */
3001
                goto invalid_opc;
3002
            case 0xF:
3003
                /* Invalid */
3004
                goto invalid_opc;
3005
            }
3006
            if (ra == 31)
3007
                tcg_temp_free(val);
3008
            tcg_temp_free(addr);
3009
        }
3010
        break;
3011
#endif
3012
    case 0x20:
3013
        /* LDF */
3014
        gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3015
        break;
3016
    case 0x21:
3017
        /* LDG */
3018
        gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3019
        break;
3020
    case 0x22:
3021
        /* LDS */
3022
        gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3023
        break;
3024
    case 0x23:
3025
        /* LDT */
3026
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3027
        break;
3028
    case 0x24:
3029
        /* STF */
3030
        gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3031
        break;
3032
    case 0x25:
3033
        /* STG */
3034
        gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3035
        break;
3036
    case 0x26:
3037
        /* STS */
3038
        gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3039
        break;
3040
    case 0x27:
3041
        /* STT */
3042
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3043
        break;
3044
    case 0x28:
3045
        /* LDL */
3046
        gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3047
        break;
3048
    case 0x29:
3049
        /* LDQ */
3050
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3051
        break;
3052
    case 0x2A:
3053
        /* LDL_L */
3054
        gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3055
        break;
3056
    case 0x2B:
3057
        /* LDQ_L */
3058
        gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3059
        break;
3060
    case 0x2C:
3061
        /* STL */
3062
        gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3063
        break;
3064
    case 0x2D:
3065
        /* STQ */
3066
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3067
        break;
3068
    case 0x2E:
3069
        /* STL_C */
3070
        ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3071
        break;
3072
    case 0x2F:
3073
        /* STQ_C */
3074
        ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3075
        break;
3076
    case 0x30:
3077
        /* BR */
3078
        ret = gen_bdirect(ctx, ra, disp21);
3079
        break;
3080
    case 0x31: /* FBEQ */
3081
        ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3082
        break;
3083
    case 0x32: /* FBLT */
3084
        ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3085
        break;
3086
    case 0x33: /* FBLE */
3087
        ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3088
        break;
3089
    case 0x34:
3090
        /* BSR */
3091
        ret = gen_bdirect(ctx, ra, disp21);
3092
        break;
3093
    case 0x35: /* FBNE */
3094
        ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3095
        break;
3096
    case 0x36: /* FBGE */
3097
        ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3098
        break;
3099
    case 0x37: /* FBGT */
3100
        ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3101
        break;
3102
    case 0x38:
3103
        /* BLBC */
3104
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3105
        break;
3106
    case 0x39:
3107
        /* BEQ */
3108
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3109
        break;
3110
    case 0x3A:
3111
        /* BLT */
3112
        ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3113
        break;
3114
    case 0x3B:
3115
        /* BLE */
3116
        ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3117
        break;
3118
    case 0x3C:
3119
        /* BLBS */
3120
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3121
        break;
3122
    case 0x3D:
3123
        /* BNE */
3124
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3125
        break;
3126
    case 0x3E:
3127
        /* BGE */
3128
        ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3129
        break;
3130
    case 0x3F:
3131
        /* BGT */
3132
        ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3133
        break;
3134
    invalid_opc:
3135
        ret = gen_invalid(ctx);
3136
        break;
3137
    }
3138

    
3139
    return ret;
3140
}
3141

    
3142
static inline void gen_intermediate_code_internal(CPUState *env,
3143
                                                  TranslationBlock *tb,
3144
                                                  int search_pc)
3145
{
3146
    DisasContext ctx, *ctxp = &ctx;
3147
    target_ulong pc_start;
3148
    uint32_t insn;
3149
    uint16_t *gen_opc_end;
3150
    CPUBreakpoint *bp;
3151
    int j, lj = -1;
3152
    ExitStatus ret;
3153
    int num_insns;
3154
    int max_insns;
3155

    
3156
    pc_start = tb->pc;
3157
    gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3158

    
3159
    ctx.tb = tb;
3160
    ctx.env = env;
3161
    ctx.pc = pc_start;
3162
    ctx.amask = env->amask;
3163
#if defined (CONFIG_USER_ONLY)
3164
    ctx.mem_idx = 0;
3165
#else
3166
    ctx.mem_idx = ((env->ps >> 3) & 3);
3167
    ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
3168
#endif
3169

    
3170
    /* ??? Every TB begins with unset rounding mode, to be initialized on
3171
       the first fp insn of the TB.  Alternately we could define a proper
3172
       default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3173
       to reset the FP_STATUS to that default at the end of any TB that
3174
       changes the default.  We could even (gasp) dynamiclly figure out
3175
       what default would be most efficient given the running program.  */
3176
    ctx.tb_rm = -1;
3177
    /* Similarly for flush-to-zero.  */
3178
    ctx.tb_ftz = -1;
3179

    
3180
    num_insns = 0;
3181
    max_insns = tb->cflags & CF_COUNT_MASK;
3182
    if (max_insns == 0)
3183
        max_insns = CF_COUNT_MASK;
3184

    
3185
    gen_icount_start();
3186
    do {
3187
        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3188
            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3189
                if (bp->pc == ctx.pc) {
3190
                    gen_excp(&ctx, EXCP_DEBUG, 0);
3191
                    break;
3192
                }
3193
            }
3194
        }
3195
        if (search_pc) {
3196
            j = gen_opc_ptr - gen_opc_buf;
3197
            if (lj < j) {
3198
                lj++;
3199
                while (lj < j)
3200
                    gen_opc_instr_start[lj++] = 0;
3201
            }
3202
            gen_opc_pc[lj] = ctx.pc;
3203
            gen_opc_instr_start[lj] = 1;
3204
            gen_opc_icount[lj] = num_insns;
3205
        }
3206
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3207
            gen_io_start();
3208
        insn = ldl_code(ctx.pc);
3209
        num_insns++;
3210

    
3211
        if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3212
            tcg_gen_debug_insn_start(ctx.pc);
3213
        }
3214

    
3215
        ctx.pc += 4;
3216
        ret = translate_one(ctxp, insn);
3217

    
3218
        /* If we reach a page boundary, are single stepping,
3219
           or exhaust instruction count, stop generation.  */
3220
        if (ret == NO_EXIT
3221
            && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3222
                || gen_opc_ptr >= gen_opc_end
3223
                || num_insns >= max_insns
3224
                || singlestep
3225
                || env->singlestep_enabled)) {
3226
            ret = EXIT_PC_STALE;
3227
        }
3228
    } while (ret == NO_EXIT);
3229

    
3230
    if (tb->cflags & CF_LAST_IO) {
3231
        gen_io_end();
3232
    }
3233

    
3234
    switch (ret) {
3235
    case EXIT_GOTO_TB:
3236
    case EXIT_NORETURN:
3237
        break;
3238
    case EXIT_PC_STALE:
3239
        tcg_gen_movi_i64(cpu_pc, ctx.pc);
3240
        /* FALLTHRU */
3241
    case EXIT_PC_UPDATED:
3242
        if (env->singlestep_enabled) {
3243
            gen_excp_1(EXCP_DEBUG, 0);
3244
        } else {
3245
            tcg_gen_exit_tb(0);
3246
        }
3247
        break;
3248
    default:
3249
        abort();
3250
    }
3251

    
3252
    gen_icount_end(tb, num_insns);
3253
    *gen_opc_ptr = INDEX_op_end;
3254
    if (search_pc) {
3255
        j = gen_opc_ptr - gen_opc_buf;
3256
        lj++;
3257
        while (lj <= j)
3258
            gen_opc_instr_start[lj++] = 0;
3259
    } else {
3260
        tb->size = ctx.pc - pc_start;
3261
        tb->icount = num_insns;
3262
    }
3263

    
3264
#ifdef DEBUG_DISAS
3265
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3266
        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3267
        log_target_disas(pc_start, ctx.pc - pc_start, 1);
3268
        qemu_log("\n");
3269
    }
3270
#endif
3271
}
3272

    
3273
void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3274
{
3275
    gen_intermediate_code_internal(env, tb, 0);
3276
}
3277

    
3278
void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3279
{
3280
    gen_intermediate_code_internal(env, tb, 1);
3281
}
3282

    
3283
struct cpu_def_t {
3284
    const char *name;
3285
    int implver, amask;
3286
};
3287

    
3288
static const struct cpu_def_t cpu_defs[] = {
3289
    { "ev4",   IMPLVER_2106x, 0 },
3290
    { "ev5",   IMPLVER_21164, 0 },
3291
    { "ev56",  IMPLVER_21164, AMASK_BWX },
3292
    { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3293
    { "ev6",   IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3294
    { "ev67",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3295
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3296
    { "ev68",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3297
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3298
    { "21064", IMPLVER_2106x, 0 },
3299
    { "21164", IMPLVER_21164, 0 },
3300
    { "21164a", IMPLVER_21164, AMASK_BWX },
3301
    { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3302
    { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3303
    { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3304
                                | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3305
};
3306

    
3307
CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3308
{
3309
    CPUAlphaState *env;
3310
    int implver, amask, i, max;
3311

    
3312
    env = qemu_mallocz(sizeof(CPUAlphaState));
3313
    cpu_exec_init(env);
3314
    alpha_translate_init();
3315
    tlb_flush(env, 1);
3316

    
3317
    /* Default to ev67; no reason not to emulate insns by default.  */
3318
    implver = IMPLVER_21264;
3319
    amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3320
             | AMASK_TRAP | AMASK_PREFETCH);
3321

    
3322
    max = ARRAY_SIZE(cpu_defs);
3323
    for (i = 0; i < max; i++) {
3324
        if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3325
            implver = cpu_defs[i].implver;
3326
            amask = cpu_defs[i].amask;
3327
            break;
3328
        }
3329
    }
3330
    env->implver = implver;
3331
    env->amask = amask;
3332

    
3333
    env->ps = 0x1F00;
3334
#if defined (CONFIG_USER_ONLY)
3335
    env->ps |= 1 << 3;
3336
    cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3337
                               | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3338
#endif
3339
    env->lock_addr = -1;
3340

    
3341
    /* Initialize IPR */
3342
#if defined (CONFIG_USER_ONLY)
3343
    env->ipr[IPR_EXC_ADDR] = 0;
3344
    env->ipr[IPR_EXC_SUM] = 0;
3345
    env->ipr[IPR_EXC_MASK] = 0;
3346
#else
3347
    {
3348
        // uint64_t hwpcb;
3349
        // hwpcb = env->ipr[IPR_PCBB];
3350
        env->ipr[IPR_ASN] = 0;
3351
        env->ipr[IPR_ASTEN] = 0;
3352
        env->ipr[IPR_ASTSR] = 0;
3353
        env->ipr[IPR_DATFX] = 0;
3354
        /* XXX: fix this */
3355
        //    env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3356
        //    env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3357
        //    env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3358
        //    env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3359
        env->ipr[IPR_FEN] = 0;
3360
        env->ipr[IPR_IPL] = 31;
3361
        env->ipr[IPR_MCES] = 0;
3362
        env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3363
        //    env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3364
        env->ipr[IPR_SISR] = 0;
3365
        env->ipr[IPR_VIRBND] = -1ULL;
3366
    }
3367
#endif
3368

    
3369
    qemu_init_vcpu(env);
3370
    return env;
3371
}
3372

    
3373
void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
3374
{
3375
    env->pc = gen_opc_pc[pc_pos];
3376
}