Statistics
| Branch: | Revision:

root / target-alpha / translate.c @ 26b46094

History | View | Annotate | Download (99.5 kB)

1
/*
2
 *  Alpha emulation cpu translation for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include <stdint.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23

    
24
#include "cpu.h"
25
#include "exec-all.h"
26
#include "disas.h"
27
#include "host-utils.h"
28
#include "tcg-op.h"
29
#include "qemu-common.h"
30

    
31
#include "helper.h"
32
#define GEN_HELPER 1
33
#include "helper.h"
34

    
35
#undef ALPHA_DEBUG_DISAS
36
#define CONFIG_SOFTFLOAT_INLINE
37

    
38
#ifdef ALPHA_DEBUG_DISAS
39
#  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40
#else
41
#  define LOG_DISAS(...) do { } while (0)
42
#endif
43

    
44
typedef struct DisasContext DisasContext;
45
struct DisasContext {
46
    struct TranslationBlock *tb;
47
    CPUAlphaState *env;
48
    uint64_t pc;
49
    int mem_idx;
50
#if !defined (CONFIG_USER_ONLY)
51
    int pal_mode;
52
#endif
53
    uint32_t amask;
54

    
55
    /* Current rounding mode for this TB.  */
56
    int tb_rm;
57
    /* Current flush-to-zero setting for this TB.  */
58
    int tb_ftz;
59
};
60

    
61
/* Return values from translate_one, indicating the state of the TB.
62
   Note that zero indicates that we are not exiting the TB.  */
63

    
64
typedef enum {
65
    NO_EXIT,
66

    
67
    /* We have emitted one or more goto_tb.  No fixup required.  */
68
    EXIT_GOTO_TB,
69

    
70
    /* We are not using a goto_tb (for whatever reason), but have updated
71
       the PC (for whatever reason), so there's no need to do it again on
72
       exiting the TB.  */
73
    EXIT_PC_UPDATED,
74

    
75
    /* We are exiting the TB, but have neither emitted a goto_tb, nor
76
       updated the PC for the next instruction to be executed.  */
77
    EXIT_PC_STALE,
78

    
79
    /* We are ending the TB with a noreturn function call, e.g. longjmp.
80
       No following code will be executed.  */
81
    EXIT_NORETURN,
82
} ExitStatus;
83

    
84
/* global register indexes */
85
static TCGv_ptr cpu_env;
86
static TCGv cpu_ir[31];
87
static TCGv cpu_fir[31];
88
static TCGv cpu_pc;
89
static TCGv cpu_lock_addr;
90
static TCGv cpu_lock_st_addr;
91
static TCGv cpu_lock_value;
92
#ifdef CONFIG_USER_ONLY
93
static TCGv cpu_uniq;
94
#endif
95

    
96
/* register names */
97
static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
98

    
99
#include "gen-icount.h"
100

    
101
static void alpha_translate_init(void)
102
{
103
    int i;
104
    char *p;
105
    static int done_init = 0;
106

    
107
    if (done_init)
108
        return;
109

    
110
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111

    
112
    p = cpu_reg_names;
113
    for (i = 0; i < 31; i++) {
114
        sprintf(p, "ir%d", i);
115
        cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
116
                                           offsetof(CPUState, ir[i]), p);
117
        p += (i < 10) ? 4 : 5;
118

    
119
        sprintf(p, "fir%d", i);
120
        cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
121
                                            offsetof(CPUState, fir[i]), p);
122
        p += (i < 10) ? 5 : 6;
123
    }
124

    
125
    cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
126
                                    offsetof(CPUState, pc), "pc");
127

    
128
    cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
129
                                           offsetof(CPUState, lock_addr),
130
                                           "lock_addr");
131
    cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
132
                                              offsetof(CPUState, lock_st_addr),
133
                                              "lock_st_addr");
134
    cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
135
                                            offsetof(CPUState, lock_value),
136
                                            "lock_value");
137

    
138
#ifdef CONFIG_USER_ONLY
139
    cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
140
                                      offsetof(CPUState, unique), "uniq");
141
#endif
142

    
143
    /* register helpers */
144
#define GEN_HELPER 2
145
#include "helper.h"
146

    
147
    done_init = 1;
148
}
149

    
150
static void gen_excp_1(int exception, int error_code)
151
{
152
    TCGv_i32 tmp1, tmp2;
153

    
154
    tmp1 = tcg_const_i32(exception);
155
    tmp2 = tcg_const_i32(error_code);
156
    gen_helper_excp(tmp1, tmp2);
157
    tcg_temp_free_i32(tmp2);
158
    tcg_temp_free_i32(tmp1);
159
}
160

    
161
static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
162
{
163
    tcg_gen_movi_i64(cpu_pc, ctx->pc);
164
    gen_excp_1(exception, error_code);
165
    return EXIT_NORETURN;
166
}
167

    
168
static inline ExitStatus gen_invalid(DisasContext *ctx)
169
{
170
    return gen_excp(ctx, EXCP_OPCDEC, 0);
171
}
172

    
173
static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
174
{
175
    TCGv tmp = tcg_temp_new();
176
    TCGv_i32 tmp32 = tcg_temp_new_i32();
177
    tcg_gen_qemu_ld32u(tmp, t1, flags);
178
    tcg_gen_trunc_i64_i32(tmp32, tmp);
179
    gen_helper_memory_to_f(t0, tmp32);
180
    tcg_temp_free_i32(tmp32);
181
    tcg_temp_free(tmp);
182
}
183

    
184
static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
185
{
186
    TCGv tmp = tcg_temp_new();
187
    tcg_gen_qemu_ld64(tmp, t1, flags);
188
    gen_helper_memory_to_g(t0, tmp);
189
    tcg_temp_free(tmp);
190
}
191

    
192
static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
193
{
194
    TCGv tmp = tcg_temp_new();
195
    TCGv_i32 tmp32 = tcg_temp_new_i32();
196
    tcg_gen_qemu_ld32u(tmp, t1, flags);
197
    tcg_gen_trunc_i64_i32(tmp32, tmp);
198
    gen_helper_memory_to_s(t0, tmp32);
199
    tcg_temp_free_i32(tmp32);
200
    tcg_temp_free(tmp);
201
}
202

    
203
static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
204
{
205
    tcg_gen_qemu_ld32s(t0, t1, flags);
206
    tcg_gen_mov_i64(cpu_lock_addr, t1);
207
    tcg_gen_mov_i64(cpu_lock_value, t0);
208
}
209

    
210
static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
211
{
212
    tcg_gen_qemu_ld64(t0, t1, flags);
213
    tcg_gen_mov_i64(cpu_lock_addr, t1);
214
    tcg_gen_mov_i64(cpu_lock_value, t0);
215
}
216

    
217
static inline void gen_load_mem(DisasContext *ctx,
218
                                void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
219
                                                          int flags),
220
                                int ra, int rb, int32_t disp16, int fp,
221
                                int clear)
222
{
223
    TCGv addr, va;
224

    
225
    /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
226
       prefetches, which we can treat as nops.  No worries about
227
       missed exceptions here.  */
228
    if (unlikely(ra == 31)) {
229
        return;
230
    }
231

    
232
    addr = tcg_temp_new();
233
    if (rb != 31) {
234
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
235
        if (clear) {
236
            tcg_gen_andi_i64(addr, addr, ~0x7);
237
        }
238
    } else {
239
        if (clear) {
240
            disp16 &= ~0x7;
241
        }
242
        tcg_gen_movi_i64(addr, disp16);
243
    }
244

    
245
    va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
246
    tcg_gen_qemu_load(va, addr, ctx->mem_idx);
247

    
248
    tcg_temp_free(addr);
249
}
250

    
251
static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
252
{
253
    TCGv_i32 tmp32 = tcg_temp_new_i32();
254
    TCGv tmp = tcg_temp_new();
255
    gen_helper_f_to_memory(tmp32, t0);
256
    tcg_gen_extu_i32_i64(tmp, tmp32);
257
    tcg_gen_qemu_st32(tmp, t1, flags);
258
    tcg_temp_free(tmp);
259
    tcg_temp_free_i32(tmp32);
260
}
261

    
262
static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
263
{
264
    TCGv tmp = tcg_temp_new();
265
    gen_helper_g_to_memory(tmp, t0);
266
    tcg_gen_qemu_st64(tmp, t1, flags);
267
    tcg_temp_free(tmp);
268
}
269

    
270
static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
271
{
272
    TCGv_i32 tmp32 = tcg_temp_new_i32();
273
    TCGv tmp = tcg_temp_new();
274
    gen_helper_s_to_memory(tmp32, t0);
275
    tcg_gen_extu_i32_i64(tmp, tmp32);
276
    tcg_gen_qemu_st32(tmp, t1, flags);
277
    tcg_temp_free(tmp);
278
    tcg_temp_free_i32(tmp32);
279
}
280

    
281
static inline void gen_store_mem(DisasContext *ctx,
282
                                 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
283
                                                            int flags),
284
                                 int ra, int rb, int32_t disp16, int fp,
285
                                 int clear)
286
{
287
    TCGv addr, va;
288

    
289
    addr = tcg_temp_new();
290
    if (rb != 31) {
291
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
292
        if (clear) {
293
            tcg_gen_andi_i64(addr, addr, ~0x7);
294
        }
295
    } else {
296
        if (clear) {
297
            disp16 &= ~0x7;
298
        }
299
        tcg_gen_movi_i64(addr, disp16);
300
    }
301

    
302
    if (ra == 31) {
303
        va = tcg_const_i64(0);
304
    } else {
305
        va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
306
    }
307
    tcg_gen_qemu_store(va, addr, ctx->mem_idx);
308

    
309
    tcg_temp_free(addr);
310
    if (ra == 31) {
311
        tcg_temp_free(va);
312
    }
313
}
314

    
315
static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
316
                                        int32_t disp16, int quad)
317
{
318
    TCGv addr;
319

    
320
    if (ra == 31) {
321
        /* ??? Don't bother storing anything.  The user can't tell
322
           the difference, since the zero register always reads zero.  */
323
        return NO_EXIT;
324
    }
325

    
326
#if defined(CONFIG_USER_ONLY)
327
    addr = cpu_lock_st_addr;
328
#else
329
    addr = tcg_temp_local_new();
330
#endif
331

    
332
    if (rb != 31) {
333
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
334
    } else {
335
        tcg_gen_movi_i64(addr, disp16);
336
    }
337

    
338
#if defined(CONFIG_USER_ONLY)
339
    /* ??? This is handled via a complicated version of compare-and-swap
340
       in the cpu_loop.  Hopefully one day we'll have a real CAS opcode
341
       in TCG so that this isn't necessary.  */
342
    return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
343
#else
344
    /* ??? In system mode we are never multi-threaded, so CAS can be
345
       implemented via a non-atomic load-compare-store sequence.  */
346
    {
347
        int lab_fail, lab_done;
348
        TCGv val;
349

    
350
        lab_fail = gen_new_label();
351
        lab_done = gen_new_label();
352
        tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
353

    
354
        val = tcg_temp_new();
355
        if (quad) {
356
            tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
357
        } else {
358
            tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
359
        }
360
        tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
361

    
362
        if (quad) {
363
            tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
364
        } else {
365
            tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
366
        }
367
        tcg_gen_movi_i64(cpu_ir[ra], 1);
368
        tcg_gen_br(lab_done);
369

    
370
        gen_set_label(lab_fail);
371
        tcg_gen_movi_i64(cpu_ir[ra], 0);
372

    
373
        gen_set_label(lab_done);
374
        tcg_gen_movi_i64(cpu_lock_addr, -1);
375

    
376
        tcg_temp_free(addr);
377
        return NO_EXIT;
378
    }
379
#endif
380
}
381

    
382
static int use_goto_tb(DisasContext *ctx, uint64_t dest)
383
{
384
    /* Check for the dest on the same page as the start of the TB.  We
385
       also want to suppress goto_tb in the case of single-steping and IO.  */
386
    return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
387
            && !ctx->env->singlestep_enabled
388
            && !(ctx->tb->cflags & CF_LAST_IO));
389
}
390

    
391
static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
392
{
393
    uint64_t dest = ctx->pc + (disp << 2);
394

    
395
    if (ra != 31) {
396
        tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
397
    }
398

    
399
    /* Notice branch-to-next; used to initialize RA with the PC.  */
400
    if (disp == 0) {
401
        return 0;
402
    } else if (use_goto_tb(ctx, dest)) {
403
        tcg_gen_goto_tb(0);
404
        tcg_gen_movi_i64(cpu_pc, dest);
405
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
406
        return EXIT_GOTO_TB;
407
    } else {
408
        tcg_gen_movi_i64(cpu_pc, dest);
409
        return EXIT_PC_UPDATED;
410
    }
411
}
412

    
413
static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
414
                                     TCGv cmp, int32_t disp)
415
{
416
    uint64_t dest = ctx->pc + (disp << 2);
417
    int lab_true = gen_new_label();
418

    
419
    if (use_goto_tb(ctx, dest)) {
420
        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
421

    
422
        tcg_gen_goto_tb(0);
423
        tcg_gen_movi_i64(cpu_pc, ctx->pc);
424
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
425

    
426
        gen_set_label(lab_true);
427
        tcg_gen_goto_tb(1);
428
        tcg_gen_movi_i64(cpu_pc, dest);
429
        tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
430

    
431
        return EXIT_GOTO_TB;
432
    } else {
433
        int lab_over = gen_new_label();
434

    
435
        /* ??? Consider using either
436
             movi pc, next
437
             addi tmp, pc, disp
438
             movcond pc, cond, 0, tmp, pc
439
           or
440
             setcond tmp, cond, 0
441
             movi pc, next
442
             neg tmp, tmp
443
             andi tmp, tmp, disp
444
             add pc, pc, tmp
445
           The current diamond subgraph surely isn't efficient.  */
446

    
447
        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
448
        tcg_gen_movi_i64(cpu_pc, ctx->pc);
449
        tcg_gen_br(lab_over);
450
        gen_set_label(lab_true);
451
        tcg_gen_movi_i64(cpu_pc, dest);
452
        gen_set_label(lab_over);
453

    
454
        return EXIT_PC_UPDATED;
455
    }
456
}
457

    
458
static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
459
                            int32_t disp, int mask)
460
{
461
    TCGv cmp_tmp;
462

    
463
    if (unlikely(ra == 31)) {
464
        cmp_tmp = tcg_const_i64(0);
465
    } else {
466
        cmp_tmp = tcg_temp_new();
467
        if (mask) {
468
            tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
469
        } else {
470
            tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
471
        }
472
    }
473

    
474
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
475
}
476

    
477
/* Fold -0.0 for comparison with COND.  */
478

    
479
static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
480
{
481
    uint64_t mzero = 1ull << 63;
482

    
483
    switch (cond) {
484
    case TCG_COND_LE:
485
    case TCG_COND_GT:
486
        /* For <= or >, the -0.0 value directly compares the way we want.  */
487
        tcg_gen_mov_i64(dest, src);
488
        break;
489

    
490
    case TCG_COND_EQ:
491
    case TCG_COND_NE:
492
        /* For == or !=, we can simply mask off the sign bit and compare.  */
493
        tcg_gen_andi_i64(dest, src, mzero - 1);
494
        break;
495

    
496
    case TCG_COND_GE:
497
    case TCG_COND_LT:
498
        /* For >= or <, map -0.0 to +0.0 via comparison and mask.  */
499
        tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
500
        tcg_gen_neg_i64(dest, dest);
501
        tcg_gen_and_i64(dest, dest, src);
502
        break;
503

    
504
    default:
505
        abort();
506
    }
507
}
508

    
509
static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
510
                             int32_t disp)
511
{
512
    TCGv cmp_tmp;
513

    
514
    if (unlikely(ra == 31)) {
515
        /* Very uncommon case, but easier to optimize it to an integer
516
           comparison than continuing with the floating point comparison.  */
517
        return gen_bcond(ctx, cond, ra, disp, 0);
518
    }
519

    
520
    cmp_tmp = tcg_temp_new();
521
    gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
522
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
523
}
524

    
525
static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
526
                     int islit, uint8_t lit, int mask)
527
{
528
    TCGCond inv_cond = tcg_invert_cond(cond);
529
    int l1;
530

    
531
    if (unlikely(rc == 31))
532
        return;
533

    
534
    l1 = gen_new_label();
535

    
536
    if (ra != 31) {
537
        if (mask) {
538
            TCGv tmp = tcg_temp_new();
539
            tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
540
            tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
541
            tcg_temp_free(tmp);
542
        } else
543
            tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
544
    } else {
545
        /* Very uncommon case - Do not bother to optimize.  */
546
        TCGv tmp = tcg_const_i64(0);
547
        tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
548
        tcg_temp_free(tmp);
549
    }
550

    
551
    if (islit)
552
        tcg_gen_movi_i64(cpu_ir[rc], lit);
553
    else
554
        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
555
    gen_set_label(l1);
556
}
557

    
558
static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
559
{
560
    TCGv cmp_tmp;
561
    int l1;
562

    
563
    if (unlikely(rc == 31)) {
564
        return;
565
    }
566

    
567
    cmp_tmp = tcg_temp_new();
568
    if (unlikely(ra == 31)) {
569
        tcg_gen_movi_i64(cmp_tmp, 0);
570
    } else {
571
        gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
572
    }
573

    
574
    l1 = gen_new_label();
575
    tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
576
    tcg_temp_free(cmp_tmp);
577

    
578
    if (rb != 31)
579
        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
580
    else
581
        tcg_gen_movi_i64(cpu_fir[rc], 0);
582
    gen_set_label(l1);
583
}
584

    
585
#define QUAL_RM_N       0x080   /* Round mode nearest even */
586
#define QUAL_RM_C       0x000   /* Round mode chopped */
587
#define QUAL_RM_M       0x040   /* Round mode minus infinity */
588
#define QUAL_RM_D       0x0c0   /* Round mode dynamic */
589
#define QUAL_RM_MASK    0x0c0
590

    
591
#define QUAL_U          0x100   /* Underflow enable (fp output) */
592
#define QUAL_V          0x100   /* Overflow enable (int output) */
593
#define QUAL_S          0x400   /* Software completion enable */
594
#define QUAL_I          0x200   /* Inexact detection enable */
595

    
596
static void gen_qual_roundmode(DisasContext *ctx, int fn11)
597
{
598
    TCGv_i32 tmp;
599

    
600
    fn11 &= QUAL_RM_MASK;
601
    if (fn11 == ctx->tb_rm) {
602
        return;
603
    }
604
    ctx->tb_rm = fn11;
605

    
606
    tmp = tcg_temp_new_i32();
607
    switch (fn11) {
608
    case QUAL_RM_N:
609
        tcg_gen_movi_i32(tmp, float_round_nearest_even);
610
        break;
611
    case QUAL_RM_C:
612
        tcg_gen_movi_i32(tmp, float_round_to_zero);
613
        break;
614
    case QUAL_RM_M:
615
        tcg_gen_movi_i32(tmp, float_round_down);
616
        break;
617
    case QUAL_RM_D:
618
        tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
619
        break;
620
    }
621

    
622
#if defined(CONFIG_SOFTFLOAT_INLINE)
623
    /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
624
       With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
625
       sets the one field.  */
626
    tcg_gen_st8_i32(tmp, cpu_env,
627
                    offsetof(CPUState, fp_status.float_rounding_mode));
628
#else
629
    gen_helper_setroundmode(tmp);
630
#endif
631

    
632
    tcg_temp_free_i32(tmp);
633
}
634

    
635
static void gen_qual_flushzero(DisasContext *ctx, int fn11)
636
{
637
    TCGv_i32 tmp;
638

    
639
    fn11 &= QUAL_U;
640
    if (fn11 == ctx->tb_ftz) {
641
        return;
642
    }
643
    ctx->tb_ftz = fn11;
644

    
645
    tmp = tcg_temp_new_i32();
646
    if (fn11) {
647
        /* Underflow is enabled, use the FPCR setting.  */
648
        tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
649
    } else {
650
        /* Underflow is disabled, force flush-to-zero.  */
651
        tcg_gen_movi_i32(tmp, 1);
652
    }
653

    
654
#if defined(CONFIG_SOFTFLOAT_INLINE)
655
    tcg_gen_st8_i32(tmp, cpu_env,
656
                    offsetof(CPUState, fp_status.flush_to_zero));
657
#else
658
    gen_helper_setflushzero(tmp);
659
#endif
660

    
661
    tcg_temp_free_i32(tmp);
662
}
663

    
664
static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
665
{
666
    TCGv val = tcg_temp_new();
667
    if (reg == 31) {
668
        tcg_gen_movi_i64(val, 0);
669
    } else if (fn11 & QUAL_S) {
670
        gen_helper_ieee_input_s(val, cpu_fir[reg]);
671
    } else if (is_cmp) {
672
        gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
673
    } else {
674
        gen_helper_ieee_input(val, cpu_fir[reg]);
675
    }
676
    return val;
677
}
678

    
679
static void gen_fp_exc_clear(void)
680
{
681
#if defined(CONFIG_SOFTFLOAT_INLINE)
682
    TCGv_i32 zero = tcg_const_i32(0);
683
    tcg_gen_st8_i32(zero, cpu_env,
684
                    offsetof(CPUState, fp_status.float_exception_flags));
685
    tcg_temp_free_i32(zero);
686
#else
687
    gen_helper_fp_exc_clear();
688
#endif
689
}
690

    
691
static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
692
{
693
    /* ??? We ought to be able to do something with imprecise exceptions.
694
       E.g. notice we're still in the trap shadow of something within the
695
       TB and do not generate the code to signal the exception; end the TB
696
       when an exception is forced to arrive, either by consumption of a
697
       register value or TRAPB or EXCB.  */
698
    TCGv_i32 exc = tcg_temp_new_i32();
699
    TCGv_i32 reg;
700

    
701
#if defined(CONFIG_SOFTFLOAT_INLINE)
702
    tcg_gen_ld8u_i32(exc, cpu_env,
703
                     offsetof(CPUState, fp_status.float_exception_flags));
704
#else
705
    gen_helper_fp_exc_get(exc);
706
#endif
707

    
708
    if (ignore) {
709
        tcg_gen_andi_i32(exc, exc, ~ignore);
710
    }
711

    
712
    /* ??? Pass in the regno of the destination so that the helper can
713
       set EXC_MASK, which contains a bitmask of destination registers
714
       that have caused arithmetic traps.  A simple userspace emulation
715
       does not require this.  We do need it for a guest kernel's entArith,
716
       or if we were to do something clever with imprecise exceptions.  */
717
    reg = tcg_const_i32(rc + 32);
718

    
719
    if (fn11 & QUAL_S) {
720
        gen_helper_fp_exc_raise_s(exc, reg);
721
    } else {
722
        gen_helper_fp_exc_raise(exc, reg);
723
    }
724

    
725
    tcg_temp_free_i32(reg);
726
    tcg_temp_free_i32(exc);
727
}
728

    
729
static inline void gen_fp_exc_raise(int rc, int fn11)
730
{
731
    gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
732
}
733

    
734
static void gen_fcvtlq(int rb, int rc)
735
{
736
    if (unlikely(rc == 31)) {
737
        return;
738
    }
739
    if (unlikely(rb == 31)) {
740
        tcg_gen_movi_i64(cpu_fir[rc], 0);
741
    } else {
742
        TCGv tmp = tcg_temp_new();
743

    
744
        /* The arithmetic right shift here, plus the sign-extended mask below
745
           yields a sign-extended result without an explicit ext32s_i64.  */
746
        tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
747
        tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
748
        tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
749
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
750
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
751

    
752
        tcg_temp_free(tmp);
753
    }
754
}
755

    
756
static void gen_fcvtql(int rb, int rc)
757
{
758
    if (unlikely(rc == 31)) {
759
        return;
760
    }
761
    if (unlikely(rb == 31)) {
762
        tcg_gen_movi_i64(cpu_fir[rc], 0);
763
    } else {
764
        TCGv tmp = tcg_temp_new();
765

    
766
        tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
767
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
768
        tcg_gen_shli_i64(tmp, tmp, 32);
769
        tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
770
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
771

    
772
        tcg_temp_free(tmp);
773
    }
774
}
775

    
776
static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
777
{
778
    if (rb != 31) {
779
        int lab = gen_new_label();
780
        TCGv tmp = tcg_temp_new();
781

    
782
        tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
783
        tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
784
        gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
785

    
786
        gen_set_label(lab);
787
    }
788
    gen_fcvtql(rb, rc);
789
}
790

    
791
#define FARITH2(name)                                   \
792
static inline void glue(gen_f, name)(int rb, int rc)    \
793
{                                                       \
794
    if (unlikely(rc == 31)) {                           \
795
        return;                                         \
796
    }                                                   \
797
    if (rb != 31) {                                     \
798
        gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
799
    } else {                                                \
800
        TCGv tmp = tcg_const_i64(0);                    \
801
        gen_helper_ ## name (cpu_fir[rc], tmp);         \
802
        tcg_temp_free(tmp);                             \
803
    }                                                   \
804
}
805

    
806
/* ??? VAX instruction qualifiers ignored.  */
807
FARITH2(sqrtf)
808
FARITH2(sqrtg)
809
FARITH2(cvtgf)
810
FARITH2(cvtgq)
811
FARITH2(cvtqf)
812
FARITH2(cvtqg)
813

    
814
static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
815
                            int rb, int rc, int fn11)
816
{
817
    TCGv vb;
818

    
819
    /* ??? This is wrong: the instruction is not a nop, it still may
820
       raise exceptions.  */
821
    if (unlikely(rc == 31)) {
822
        return;
823
    }
824

    
825
    gen_qual_roundmode(ctx, fn11);
826
    gen_qual_flushzero(ctx, fn11);
827
    gen_fp_exc_clear();
828

    
829
    vb = gen_ieee_input(rb, fn11, 0);
830
    helper(cpu_fir[rc], vb);
831
    tcg_temp_free(vb);
832

    
833
    gen_fp_exc_raise(rc, fn11);
834
}
835

    
836
#define IEEE_ARITH2(name)                                       \
837
static inline void glue(gen_f, name)(DisasContext *ctx,         \
838
                                     int rb, int rc, int fn11)  \
839
{                                                               \
840
    gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
841
}
842
IEEE_ARITH2(sqrts)
843
IEEE_ARITH2(sqrtt)
844
IEEE_ARITH2(cvtst)
845
IEEE_ARITH2(cvtts)
846

    
847
static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
848
{
849
    TCGv vb;
850
    int ignore = 0;
851

    
852
    /* ??? This is wrong: the instruction is not a nop, it still may
853
       raise exceptions.  */
854
    if (unlikely(rc == 31)) {
855
        return;
856
    }
857

    
858
    /* No need to set flushzero, since we have an integer output.  */
859
    gen_fp_exc_clear();
860
    vb = gen_ieee_input(rb, fn11, 0);
861

    
862
    /* Almost all integer conversions use cropped rounding, and most
863
       also do not have integer overflow enabled.  Special case that.  */
864
    switch (fn11) {
865
    case QUAL_RM_C:
866
        gen_helper_cvttq_c(cpu_fir[rc], vb);
867
        break;
868
    case QUAL_V | QUAL_RM_C:
869
    case QUAL_S | QUAL_V | QUAL_RM_C:
870
        ignore = float_flag_inexact;
871
        /* FALLTHRU */
872
    case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
873
        gen_helper_cvttq_svic(cpu_fir[rc], vb);
874
        break;
875
    default:
876
        gen_qual_roundmode(ctx, fn11);
877
        gen_helper_cvttq(cpu_fir[rc], vb);
878
        ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
879
        ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
880
        break;
881
    }
882
    tcg_temp_free(vb);
883

    
884
    gen_fp_exc_raise_ignore(rc, fn11, ignore);
885
}
886

    
887
static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
888
                            int rb, int rc, int fn11)
889
{
890
    TCGv vb;
891

    
892
    /* ??? This is wrong: the instruction is not a nop, it still may
893
       raise exceptions.  */
894
    if (unlikely(rc == 31)) {
895
        return;
896
    }
897

    
898
    gen_qual_roundmode(ctx, fn11);
899

    
900
    if (rb == 31) {
901
        vb = tcg_const_i64(0);
902
    } else {
903
        vb = cpu_fir[rb];
904
    }
905

    
906
    /* The only exception that can be raised by integer conversion
907
       is inexact.  Thus we only need to worry about exceptions when
908
       inexact handling is requested.  */
909
    if (fn11 & QUAL_I) {
910
        gen_fp_exc_clear();
911
        helper(cpu_fir[rc], vb);
912
        gen_fp_exc_raise(rc, fn11);
913
    } else {
914
        helper(cpu_fir[rc], vb);
915
    }
916

    
917
    if (rb == 31) {
918
        tcg_temp_free(vb);
919
    }
920
}
921

    
922
#define IEEE_INTCVT(name)                                       \
923
static inline void glue(gen_f, name)(DisasContext *ctx,         \
924
                                     int rb, int rc, int fn11)  \
925
{                                                               \
926
    gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
927
}
928
IEEE_INTCVT(cvtqs)
929
IEEE_INTCVT(cvtqt)
930

    
931
static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
932
{
933
    TCGv va, vb, vmask;
934
    int za = 0, zb = 0;
935

    
936
    if (unlikely(rc == 31)) {
937
        return;
938
    }
939

    
940
    vmask = tcg_const_i64(mask);
941

    
942
    TCGV_UNUSED_I64(va);
943
    if (ra == 31) {
944
        if (inv_a) {
945
            va = vmask;
946
        } else {
947
            za = 1;
948
        }
949
    } else {
950
        va = tcg_temp_new_i64();
951
        tcg_gen_mov_i64(va, cpu_fir[ra]);
952
        if (inv_a) {
953
            tcg_gen_andc_i64(va, vmask, va);
954
        } else {
955
            tcg_gen_and_i64(va, va, vmask);
956
        }
957
    }
958

    
959
    TCGV_UNUSED_I64(vb);
960
    if (rb == 31) {
961
        zb = 1;
962
    } else {
963
        vb = tcg_temp_new_i64();
964
        tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
965
    }
966

    
967
    switch (za << 1 | zb) {
968
    case 0 | 0:
969
        tcg_gen_or_i64(cpu_fir[rc], va, vb);
970
        break;
971
    case 0 | 1:
972
        tcg_gen_mov_i64(cpu_fir[rc], va);
973
        break;
974
    case 2 | 0:
975
        tcg_gen_mov_i64(cpu_fir[rc], vb);
976
        break;
977
    case 2 | 1:
978
        tcg_gen_movi_i64(cpu_fir[rc], 0);
979
        break;
980
    }
981

    
982
    tcg_temp_free(vmask);
983
    if (ra != 31) {
984
        tcg_temp_free(va);
985
    }
986
    if (rb != 31) {
987
        tcg_temp_free(vb);
988
    }
989
}
990

    
991
static inline void gen_fcpys(int ra, int rb, int rc)
992
{
993
    gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
994
}
995

    
996
static inline void gen_fcpysn(int ra, int rb, int rc)
997
{
998
    gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
999
}
1000

    
1001
static inline void gen_fcpyse(int ra, int rb, int rc)
1002
{
1003
    gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1004
}
1005

    
1006
#define FARITH3(name)                                           \
1007
static inline void glue(gen_f, name)(int ra, int rb, int rc)    \
1008
{                                                               \
1009
    TCGv va, vb;                                                \
1010
                                                                \
1011
    if (unlikely(rc == 31)) {                                   \
1012
        return;                                                 \
1013
    }                                                           \
1014
    if (ra == 31) {                                             \
1015
        va = tcg_const_i64(0);                                  \
1016
    } else {                                                    \
1017
        va = cpu_fir[ra];                                       \
1018
    }                                                           \
1019
    if (rb == 31) {                                             \
1020
        vb = tcg_const_i64(0);                                  \
1021
    } else {                                                    \
1022
        vb = cpu_fir[rb];                                       \
1023
    }                                                           \
1024
                                                                \
1025
    gen_helper_ ## name (cpu_fir[rc], va, vb);                  \
1026
                                                                \
1027
    if (ra == 31) {                                             \
1028
        tcg_temp_free(va);                                      \
1029
    }                                                           \
1030
    if (rb == 31) {                                             \
1031
        tcg_temp_free(vb);                                      \
1032
    }                                                           \
1033
}
1034

    
1035
/* ??? VAX instruction qualifiers ignored.  */
1036
FARITH3(addf)
1037
FARITH3(subf)
1038
FARITH3(mulf)
1039
FARITH3(divf)
1040
FARITH3(addg)
1041
FARITH3(subg)
1042
FARITH3(mulg)
1043
FARITH3(divg)
1044
FARITH3(cmpgeq)
1045
FARITH3(cmpglt)
1046
FARITH3(cmpgle)
1047

    
1048
static void gen_ieee_arith3(DisasContext *ctx,
1049
                            void (*helper)(TCGv, TCGv, TCGv),
1050
                            int ra, int rb, int rc, int fn11)
1051
{
1052
    TCGv va, vb;
1053

    
1054
    /* ??? This is wrong: the instruction is not a nop, it still may
1055
       raise exceptions.  */
1056
    if (unlikely(rc == 31)) {
1057
        return;
1058
    }
1059

    
1060
    gen_qual_roundmode(ctx, fn11);
1061
    gen_qual_flushzero(ctx, fn11);
1062
    gen_fp_exc_clear();
1063

    
1064
    va = gen_ieee_input(ra, fn11, 0);
1065
    vb = gen_ieee_input(rb, fn11, 0);
1066
    helper(cpu_fir[rc], va, vb);
1067
    tcg_temp_free(va);
1068
    tcg_temp_free(vb);
1069

    
1070
    gen_fp_exc_raise(rc, fn11);
1071
}
1072

    
1073
#define IEEE_ARITH3(name)                                               \
1074
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1075
                                     int ra, int rb, int rc, int fn11)  \
1076
{                                                                       \
1077
    gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
1078
}
1079
IEEE_ARITH3(adds)
1080
IEEE_ARITH3(subs)
1081
IEEE_ARITH3(muls)
1082
IEEE_ARITH3(divs)
1083
IEEE_ARITH3(addt)
1084
IEEE_ARITH3(subt)
1085
IEEE_ARITH3(mult)
1086
IEEE_ARITH3(divt)
1087

    
1088
static void gen_ieee_compare(DisasContext *ctx,
1089
                             void (*helper)(TCGv, TCGv, TCGv),
1090
                             int ra, int rb, int rc, int fn11)
1091
{
1092
    TCGv va, vb;
1093

    
1094
    /* ??? This is wrong: the instruction is not a nop, it still may
1095
       raise exceptions.  */
1096
    if (unlikely(rc == 31)) {
1097
        return;
1098
    }
1099

    
1100
    gen_fp_exc_clear();
1101

    
1102
    va = gen_ieee_input(ra, fn11, 1);
1103
    vb = gen_ieee_input(rb, fn11, 1);
1104
    helper(cpu_fir[rc], va, vb);
1105
    tcg_temp_free(va);
1106
    tcg_temp_free(vb);
1107

    
1108
    gen_fp_exc_raise(rc, fn11);
1109
}
1110

    
1111
#define IEEE_CMP3(name)                                                 \
1112
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1113
                                     int ra, int rb, int rc, int fn11)  \
1114
{                                                                       \
1115
    gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
1116
}
1117
IEEE_CMP3(cmptun)
1118
IEEE_CMP3(cmpteq)
1119
IEEE_CMP3(cmptlt)
1120
IEEE_CMP3(cmptle)
1121

    
1122
static inline uint64_t zapnot_mask(uint8_t lit)
1123
{
1124
    uint64_t mask = 0;
1125
    int i;
1126

    
1127
    for (i = 0; i < 8; ++i) {
1128
        if ((lit >> i) & 1)
1129
            mask |= 0xffull << (i * 8);
1130
    }
1131
    return mask;
1132
}
1133

    
1134
/* Implement zapnot with an immediate operand, which expands to some
1135
   form of immediate AND.  This is a basic building block in the
1136
   definition of many of the other byte manipulation instructions.  */
1137
static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1138
{
1139
    switch (lit) {
1140
    case 0x00:
1141
        tcg_gen_movi_i64(dest, 0);
1142
        break;
1143
    case 0x01:
1144
        tcg_gen_ext8u_i64(dest, src);
1145
        break;
1146
    case 0x03:
1147
        tcg_gen_ext16u_i64(dest, src);
1148
        break;
1149
    case 0x0f:
1150
        tcg_gen_ext32u_i64(dest, src);
1151
        break;
1152
    case 0xff:
1153
        tcg_gen_mov_i64(dest, src);
1154
        break;
1155
    default:
1156
        tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1157
        break;
1158
    }
1159
}
1160

    
1161
static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1162
{
1163
    if (unlikely(rc == 31))
1164
        return;
1165
    else if (unlikely(ra == 31))
1166
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1167
    else if (islit)
1168
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1169
    else
1170
        gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1171
}
1172

    
1173
static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1174
{
1175
    if (unlikely(rc == 31))
1176
        return;
1177
    else if (unlikely(ra == 31))
1178
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1179
    else if (islit)
1180
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1181
    else
1182
        gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1183
}
1184

    
1185

    
1186
/* EXTWH, EXTLH, EXTQH */
1187
static void gen_ext_h(int ra, int rb, int rc, int islit,
1188
                      uint8_t lit, uint8_t byte_mask)
1189
{
1190
    if (unlikely(rc == 31))
1191
        return;
1192
    else if (unlikely(ra == 31))
1193
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1194
    else {
1195
        if (islit) {
1196
            lit = (64 - (lit & 7) * 8) & 0x3f;
1197
            tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1198
        } else {
1199
            TCGv tmp1 = tcg_temp_new();
1200
            tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1201
            tcg_gen_shli_i64(tmp1, tmp1, 3);
1202
            tcg_gen_neg_i64(tmp1, tmp1);
1203
            tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1204
            tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1205
            tcg_temp_free(tmp1);
1206
        }
1207
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1208
    }
1209
}
1210

    
1211
/* EXTBL, EXTWL, EXTLL, EXTQL */
1212
static void gen_ext_l(int ra, int rb, int rc, int islit,
1213
                      uint8_t lit, uint8_t byte_mask)
1214
{
1215
    if (unlikely(rc == 31))
1216
        return;
1217
    else if (unlikely(ra == 31))
1218
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1219
    else {
1220
        if (islit) {
1221
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1222
        } else {
1223
            TCGv tmp = tcg_temp_new();
1224
            tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1225
            tcg_gen_shli_i64(tmp, tmp, 3);
1226
            tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1227
            tcg_temp_free(tmp);
1228
        }
1229
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1230
    }
1231
}
1232

    
1233
/* INSWH, INSLH, INSQH */
1234
static void gen_ins_h(int ra, int rb, int rc, int islit,
1235
                      uint8_t lit, uint8_t byte_mask)
1236
{
1237
    if (unlikely(rc == 31))
1238
        return;
1239
    else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1240
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1241
    else {
1242
        TCGv tmp = tcg_temp_new();
1243

    
1244
        /* The instruction description has us left-shift the byte mask
1245
           and extract bits <15:8> and apply that zap at the end.  This
1246
           is equivalent to simply performing the zap first and shifting
1247
           afterward.  */
1248
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1249

    
1250
        if (islit) {
1251
            /* Note that we have handled the lit==0 case above.  */
1252
            tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1253
        } else {
1254
            TCGv shift = tcg_temp_new();
1255

    
1256
            /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1257
               Do this portably by splitting the shift into two parts:
1258
               shift_count-1 and 1.  Arrange for the -1 by using
1259
               ones-complement instead of twos-complement in the negation:
1260
               ~((B & 7) * 8) & 63.  */
1261

    
1262
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1263
            tcg_gen_shli_i64(shift, shift, 3);
1264
            tcg_gen_not_i64(shift, shift);
1265
            tcg_gen_andi_i64(shift, shift, 0x3f);
1266

    
1267
            tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1268
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1269
            tcg_temp_free(shift);
1270
        }
1271
        tcg_temp_free(tmp);
1272
    }
1273
}
1274

    
1275
/* INSBL, INSWL, INSLL, INSQL */
1276
static void gen_ins_l(int ra, int rb, int rc, int islit,
1277
                      uint8_t lit, uint8_t byte_mask)
1278
{
1279
    if (unlikely(rc == 31))
1280
        return;
1281
    else if (unlikely(ra == 31))
1282
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1283
    else {
1284
        TCGv tmp = tcg_temp_new();
1285

    
1286
        /* The instruction description has us left-shift the byte mask
1287
           the same number of byte slots as the data and apply the zap
1288
           at the end.  This is equivalent to simply performing the zap
1289
           first and shifting afterward.  */
1290
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1291

    
1292
        if (islit) {
1293
            tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1294
        } else {
1295
            TCGv shift = tcg_temp_new();
1296
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1297
            tcg_gen_shli_i64(shift, shift, 3);
1298
            tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1299
            tcg_temp_free(shift);
1300
        }
1301
        tcg_temp_free(tmp);
1302
    }
1303
}
1304

    
1305
/* MSKWH, MSKLH, MSKQH */
1306
static void gen_msk_h(int ra, int rb, int rc, int islit,
1307
                      uint8_t lit, uint8_t byte_mask)
1308
{
1309
    if (unlikely(rc == 31))
1310
        return;
1311
    else if (unlikely(ra == 31))
1312
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1313
    else if (islit) {
1314
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1315
    } else {
1316
        TCGv shift = tcg_temp_new();
1317
        TCGv mask = tcg_temp_new();
1318

    
1319
        /* The instruction description is as above, where the byte_mask
1320
           is shifted left, and then we extract bits <15:8>.  This can be
1321
           emulated with a right-shift on the expanded byte mask.  This
1322
           requires extra care because for an input <2:0> == 0 we need a
1323
           shift of 64 bits in order to generate a zero.  This is done by
1324
           splitting the shift into two parts, the variable shift - 1
1325
           followed by a constant 1 shift.  The code we expand below is
1326
           equivalent to ~((B & 7) * 8) & 63.  */
1327

    
1328
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1329
        tcg_gen_shli_i64(shift, shift, 3);
1330
        tcg_gen_not_i64(shift, shift);
1331
        tcg_gen_andi_i64(shift, shift, 0x3f);
1332
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1333
        tcg_gen_shr_i64(mask, mask, shift);
1334
        tcg_gen_shri_i64(mask, mask, 1);
1335

    
1336
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1337

    
1338
        tcg_temp_free(mask);
1339
        tcg_temp_free(shift);
1340
    }
1341
}
1342

    
1343
/* MSKBL, MSKWL, MSKLL, MSKQL */
1344
static void gen_msk_l(int ra, int rb, int rc, int islit,
1345
                      uint8_t lit, uint8_t byte_mask)
1346
{
1347
    if (unlikely(rc == 31))
1348
        return;
1349
    else if (unlikely(ra == 31))
1350
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1351
    else if (islit) {
1352
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1353
    } else {
1354
        TCGv shift = tcg_temp_new();
1355
        TCGv mask = tcg_temp_new();
1356

    
1357
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1358
        tcg_gen_shli_i64(shift, shift, 3);
1359
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1360
        tcg_gen_shl_i64(mask, mask, shift);
1361

    
1362
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1363

    
1364
        tcg_temp_free(mask);
1365
        tcg_temp_free(shift);
1366
    }
1367
}
1368

    
1369
/* Code to call arith3 helpers */
1370
#define ARITH3(name)                                                  \
1371
static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1372
                                    uint8_t lit)                      \
1373
{                                                                     \
1374
    if (unlikely(rc == 31))                                           \
1375
        return;                                                       \
1376
                                                                      \
1377
    if (ra != 31) {                                                   \
1378
        if (islit) {                                                  \
1379
            TCGv tmp = tcg_const_i64(lit);                            \
1380
            gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp);         \
1381
            tcg_temp_free(tmp);                                       \
1382
        } else                                                        \
1383
            gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1384
    } else {                                                          \
1385
        TCGv tmp1 = tcg_const_i64(0);                                 \
1386
        if (islit) {                                                  \
1387
            TCGv tmp2 = tcg_const_i64(lit);                           \
1388
            gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2);             \
1389
            tcg_temp_free(tmp2);                                      \
1390
        } else                                                        \
1391
            gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]);       \
1392
        tcg_temp_free(tmp1);                                          \
1393
    }                                                                 \
1394
}
1395
ARITH3(cmpbge)
1396
ARITH3(addlv)
1397
ARITH3(sublv)
1398
ARITH3(addqv)
1399
ARITH3(subqv)
1400
ARITH3(umulh)
1401
ARITH3(mullv)
1402
ARITH3(mulqv)
1403
ARITH3(minub8)
1404
ARITH3(minsb8)
1405
ARITH3(minuw4)
1406
ARITH3(minsw4)
1407
ARITH3(maxub8)
1408
ARITH3(maxsb8)
1409
ARITH3(maxuw4)
1410
ARITH3(maxsw4)
1411
ARITH3(perr)
1412

    
1413
#define MVIOP2(name)                                    \
1414
static inline void glue(gen_, name)(int rb, int rc)     \
1415
{                                                       \
1416
    if (unlikely(rc == 31))                             \
1417
        return;                                         \
1418
    if (unlikely(rb == 31))                             \
1419
        tcg_gen_movi_i64(cpu_ir[rc], 0);                \
1420
    else                                                \
1421
        gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]);   \
1422
}
1423
MVIOP2(pklb)
1424
MVIOP2(pkwb)
1425
MVIOP2(unpkbl)
1426
MVIOP2(unpkbw)
1427

    
1428
static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1429
                    int islit, uint8_t lit)
1430
{
1431
    TCGv va, vb;
1432

    
1433
    if (unlikely(rc == 31)) {
1434
        return;
1435
    }
1436

    
1437
    if (ra == 31) {
1438
        va = tcg_const_i64(0);
1439
    } else {
1440
        va = cpu_ir[ra];
1441
    }
1442
    if (islit) {
1443
        vb = tcg_const_i64(lit);
1444
    } else {
1445
        vb = cpu_ir[rb];
1446
    }
1447

    
1448
    tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1449

    
1450
    if (ra == 31) {
1451
        tcg_temp_free(va);
1452
    }
1453
    if (islit) {
1454
        tcg_temp_free(vb);
1455
    }
1456
}
1457

    
1458
static void gen_rx(int ra, int set)
1459
{
1460
    TCGv_i32 tmp;
1461

    
1462
    if (ra != 31) {
1463
        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1464
    }
1465

    
1466
    tmp = tcg_const_i32(set);
1467
    tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1468
    tcg_temp_free_i32(tmp);
1469
}
1470

    
1471
#ifndef CONFIG_USER_ONLY
1472

    
1473
#define PR_BYTE         0x100000
1474
#define PR_LONG         0x200000
1475

    
1476
static int cpu_pr_data(int pr)
1477
{
1478
    switch (pr) {
1479
    case  0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1480
    case  1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1481
    case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1482
    case  3: return offsetof(CPUAlphaState, trap_arg0);
1483
    case  4: return offsetof(CPUAlphaState, trap_arg1);
1484
    case  5: return offsetof(CPUAlphaState, trap_arg2);
1485
    case  6: return offsetof(CPUAlphaState, exc_addr);
1486
    case  7: return offsetof(CPUAlphaState, palbr);
1487
    case  8: return offsetof(CPUAlphaState, ptbr);
1488
    case  9: return offsetof(CPUAlphaState, vptptr);
1489
    case 10: return offsetof(CPUAlphaState, unique);
1490
    case 11: return offsetof(CPUAlphaState, sysval);
1491
    case 12: return offsetof(CPUAlphaState, usp);
1492

    
1493
    case 32 ... 39:
1494
        return offsetof(CPUAlphaState, shadow[pr - 32]);
1495
    case 40 ... 63:
1496
        return offsetof(CPUAlphaState, scratch[pr - 40]);
1497
    }
1498
    return 0;
1499
}
1500

    
1501
static void gen_mfpr(int ra, int regno)
1502
{
1503
    int data = cpu_pr_data(regno);
1504

    
1505
    /* In our emulated PALcode, these processor registers have no
1506
       side effects from reading.  */
1507
    if (ra == 31) {
1508
        return;
1509
    }
1510

    
1511
    /* The basic registers are data only, and unknown registers
1512
       are read-zero, write-ignore.  */
1513
    if (data == 0) {
1514
        tcg_gen_movi_i64(cpu_ir[ra], 0);
1515
    } else if (data & PR_BYTE) {
1516
        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1517
    } else if (data & PR_LONG) {
1518
        tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1519
    } else {
1520
        tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1521
    }
1522
}
1523

    
1524
static void gen_mtpr(int rb, int regno)
1525
{
1526
    TCGv tmp;
1527
    int data;
1528

    
1529
    if (rb == 31) {
1530
        tmp = tcg_const_i64(0);
1531
    } else {
1532
        tmp = cpu_ir[rb];
1533
    }
1534

    
1535
    /* The basic registers are data only, and unknown registers
1536
       are read-zero, write-ignore.  */
1537
    data = cpu_pr_data(regno);
1538
    if (data != 0) {
1539
        if (data & PR_BYTE) {
1540
            tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1541
        } else if (data & PR_LONG) {
1542
            tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1543
        } else {
1544
            tcg_gen_st_i64(tmp, cpu_env, data);
1545
        }
1546
    }
1547

    
1548
    if (rb == 31) {
1549
        tcg_temp_free(tmp);
1550
    }
1551
}
1552
#endif /* !USER_ONLY*/
1553

    
1554
static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1555
{
1556
    uint32_t palcode;
1557
    int32_t disp21, disp16, disp12;
1558
    uint16_t fn11;
1559
    uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
1560
    uint8_t lit;
1561
    ExitStatus ret;
1562

    
1563
    /* Decode all instruction fields */
1564
    opc = insn >> 26;
1565
    ra = (insn >> 21) & 0x1F;
1566
    rb = (insn >> 16) & 0x1F;
1567
    rc = insn & 0x1F;
1568
    real_islit = islit = (insn >> 12) & 1;
1569
    if (rb == 31 && !islit) {
1570
        islit = 1;
1571
        lit = 0;
1572
    } else
1573
        lit = (insn >> 13) & 0xFF;
1574
    palcode = insn & 0x03FFFFFF;
1575
    disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1576
    disp16 = (int16_t)(insn & 0x0000FFFF);
1577
    disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1578
    fn11 = (insn >> 5) & 0x000007FF;
1579
    fpfn = fn11 & 0x3F;
1580
    fn7 = (insn >> 5) & 0x0000007F;
1581
    fn2 = (insn >> 5) & 0x00000003;
1582
    LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1583
              opc, ra, rb, rc, disp16);
1584

    
1585
    ret = NO_EXIT;
1586
    switch (opc) {
1587
    case 0x00:
1588
        /* CALL_PAL */
1589
#ifdef CONFIG_USER_ONLY
1590
        if (palcode == 0x9E) {
1591
            /* RDUNIQUE */
1592
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1593
            break;
1594
        } else if (palcode == 0x9F) {
1595
            /* WRUNIQUE */
1596
            tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1597
            break;
1598
        }
1599
#endif
1600
        if (palcode >= 0x80 && palcode < 0xC0) {
1601
            /* Unprivileged PAL call */
1602
            ret = gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xBF);
1603
            break;
1604
        }
1605
#ifndef CONFIG_USER_ONLY
1606
        if (palcode < 0x40) {
1607
            /* Privileged PAL code */
1608
            if (ctx->mem_idx != MMU_KERNEL_IDX) {
1609
                goto invalid_opc;
1610
            }
1611
            ret = gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3F);
1612
        }
1613
#endif
1614
        /* Invalid PAL call */
1615
        goto invalid_opc;
1616
    case 0x01:
1617
        /* OPC01 */
1618
        goto invalid_opc;
1619
    case 0x02:
1620
        /* OPC02 */
1621
        goto invalid_opc;
1622
    case 0x03:
1623
        /* OPC03 */
1624
        goto invalid_opc;
1625
    case 0x04:
1626
        /* OPC04 */
1627
        goto invalid_opc;
1628
    case 0x05:
1629
        /* OPC05 */
1630
        goto invalid_opc;
1631
    case 0x06:
1632
        /* OPC06 */
1633
        goto invalid_opc;
1634
    case 0x07:
1635
        /* OPC07 */
1636
        goto invalid_opc;
1637
    case 0x08:
1638
        /* LDA */
1639
        if (likely(ra != 31)) {
1640
            if (rb != 31)
1641
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1642
            else
1643
                tcg_gen_movi_i64(cpu_ir[ra], disp16);
1644
        }
1645
        break;
1646
    case 0x09:
1647
        /* LDAH */
1648
        if (likely(ra != 31)) {
1649
            if (rb != 31)
1650
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1651
            else
1652
                tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1653
        }
1654
        break;
1655
    case 0x0A:
1656
        /* LDBU */
1657
        if (!(ctx->amask & AMASK_BWX))
1658
            goto invalid_opc;
1659
        gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1660
        break;
1661
    case 0x0B:
1662
        /* LDQ_U */
1663
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1664
        break;
1665
    case 0x0C:
1666
        /* LDWU */
1667
        if (!(ctx->amask & AMASK_BWX))
1668
            goto invalid_opc;
1669
        gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1670
        break;
1671
    case 0x0D:
1672
        /* STW */
1673
        gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1674
        break;
1675
    case 0x0E:
1676
        /* STB */
1677
        gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1678
        break;
1679
    case 0x0F:
1680
        /* STQ_U */
1681
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1682
        break;
1683
    case 0x10:
1684
        switch (fn7) {
1685
        case 0x00:
1686
            /* ADDL */
1687
            if (likely(rc != 31)) {
1688
                if (ra != 31) {
1689
                    if (islit) {
1690
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1691
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1692
                    } else {
1693
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1694
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1695
                    }
1696
                } else {
1697
                    if (islit)
1698
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1699
                    else
1700
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1701
                }
1702
            }
1703
            break;
1704
        case 0x02:
1705
            /* S4ADDL */
1706
            if (likely(rc != 31)) {
1707
                if (ra != 31) {
1708
                    TCGv tmp = tcg_temp_new();
1709
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1710
                    if (islit)
1711
                        tcg_gen_addi_i64(tmp, tmp, lit);
1712
                    else
1713
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1714
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1715
                    tcg_temp_free(tmp);
1716
                } else {
1717
                    if (islit)
1718
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1719
                    else
1720
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1721
                }
1722
            }
1723
            break;
1724
        case 0x09:
1725
            /* SUBL */
1726
            if (likely(rc != 31)) {
1727
                if (ra != 31) {
1728
                    if (islit)
1729
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1730
                    else
1731
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1732
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1733
                } else {
1734
                    if (islit)
1735
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1736
                    else {
1737
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1738
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1739
                }
1740
            }
1741
            break;
1742
        case 0x0B:
1743
            /* S4SUBL */
1744
            if (likely(rc != 31)) {
1745
                if (ra != 31) {
1746
                    TCGv tmp = tcg_temp_new();
1747
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1748
                    if (islit)
1749
                        tcg_gen_subi_i64(tmp, tmp, lit);
1750
                    else
1751
                        tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1752
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1753
                    tcg_temp_free(tmp);
1754
                } else {
1755
                    if (islit)
1756
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1757
                    else {
1758
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1759
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1760
                    }
1761
                }
1762
            }
1763
            break;
1764
        case 0x0F:
1765
            /* CMPBGE */
1766
            gen_cmpbge(ra, rb, rc, islit, lit);
1767
            break;
1768
        case 0x12:
1769
            /* S8ADDL */
1770
            if (likely(rc != 31)) {
1771
                if (ra != 31) {
1772
                    TCGv tmp = tcg_temp_new();
1773
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1774
                    if (islit)
1775
                        tcg_gen_addi_i64(tmp, tmp, lit);
1776
                    else
1777
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1778
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1779
                    tcg_temp_free(tmp);
1780
                } else {
1781
                    if (islit)
1782
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1783
                    else
1784
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1785
                }
1786
            }
1787
            break;
1788
        case 0x1B:
1789
            /* S8SUBL */
1790
            if (likely(rc != 31)) {
1791
                if (ra != 31) {
1792
                    TCGv tmp = tcg_temp_new();
1793
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1794
                    if (islit)
1795
                        tcg_gen_subi_i64(tmp, tmp, lit);
1796
                    else
1797
                       tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1798
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1799
                    tcg_temp_free(tmp);
1800
                } else {
1801
                    if (islit)
1802
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1803
                    else
1804
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1805
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1806
                    }
1807
                }
1808
            }
1809
            break;
1810
        case 0x1D:
1811
            /* CMPULT */
1812
            gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1813
            break;
1814
        case 0x20:
1815
            /* ADDQ */
1816
            if (likely(rc != 31)) {
1817
                if (ra != 31) {
1818
                    if (islit)
1819
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1820
                    else
1821
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1822
                } else {
1823
                    if (islit)
1824
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1825
                    else
1826
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1827
                }
1828
            }
1829
            break;
1830
        case 0x22:
1831
            /* S4ADDQ */
1832
            if (likely(rc != 31)) {
1833
                if (ra != 31) {
1834
                    TCGv tmp = tcg_temp_new();
1835
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1836
                    if (islit)
1837
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1838
                    else
1839
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1840
                    tcg_temp_free(tmp);
1841
                } else {
1842
                    if (islit)
1843
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1844
                    else
1845
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1846
                }
1847
            }
1848
            break;
1849
        case 0x29:
1850
            /* SUBQ */
1851
            if (likely(rc != 31)) {
1852
                if (ra != 31) {
1853
                    if (islit)
1854
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1855
                    else
1856
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1857
                } else {
1858
                    if (islit)
1859
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1860
                    else
1861
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1862
                }
1863
            }
1864
            break;
1865
        case 0x2B:
1866
            /* S4SUBQ */
1867
            if (likely(rc != 31)) {
1868
                if (ra != 31) {
1869
                    TCGv tmp = tcg_temp_new();
1870
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1871
                    if (islit)
1872
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1873
                    else
1874
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1875
                    tcg_temp_free(tmp);
1876
                } else {
1877
                    if (islit)
1878
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1879
                    else
1880
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1881
                }
1882
            }
1883
            break;
1884
        case 0x2D:
1885
            /* CMPEQ */
1886
            gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1887
            break;
1888
        case 0x32:
1889
            /* S8ADDQ */
1890
            if (likely(rc != 31)) {
1891
                if (ra != 31) {
1892
                    TCGv tmp = tcg_temp_new();
1893
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1894
                    if (islit)
1895
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1896
                    else
1897
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1898
                    tcg_temp_free(tmp);
1899
                } else {
1900
                    if (islit)
1901
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1902
                    else
1903
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1904
                }
1905
            }
1906
            break;
1907
        case 0x3B:
1908
            /* S8SUBQ */
1909
            if (likely(rc != 31)) {
1910
                if (ra != 31) {
1911
                    TCGv tmp = tcg_temp_new();
1912
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1913
                    if (islit)
1914
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1915
                    else
1916
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1917
                    tcg_temp_free(tmp);
1918
                } else {
1919
                    if (islit)
1920
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1921
                    else
1922
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1923
                }
1924
            }
1925
            break;
1926
        case 0x3D:
1927
            /* CMPULE */
1928
            gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1929
            break;
1930
        case 0x40:
1931
            /* ADDL/V */
1932
            gen_addlv(ra, rb, rc, islit, lit);
1933
            break;
1934
        case 0x49:
1935
            /* SUBL/V */
1936
            gen_sublv(ra, rb, rc, islit, lit);
1937
            break;
1938
        case 0x4D:
1939
            /* CMPLT */
1940
            gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1941
            break;
1942
        case 0x60:
1943
            /* ADDQ/V */
1944
            gen_addqv(ra, rb, rc, islit, lit);
1945
            break;
1946
        case 0x69:
1947
            /* SUBQ/V */
1948
            gen_subqv(ra, rb, rc, islit, lit);
1949
            break;
1950
        case 0x6D:
1951
            /* CMPLE */
1952
            gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1953
            break;
1954
        default:
1955
            goto invalid_opc;
1956
        }
1957
        break;
1958
    case 0x11:
1959
        switch (fn7) {
1960
        case 0x00:
1961
            /* AND */
1962
            if (likely(rc != 31)) {
1963
                if (ra == 31)
1964
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
1965
                else if (islit)
1966
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1967
                else
1968
                    tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1969
            }
1970
            break;
1971
        case 0x08:
1972
            /* BIC */
1973
            if (likely(rc != 31)) {
1974
                if (ra != 31) {
1975
                    if (islit)
1976
                        tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1977
                    else
1978
                        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1979
                } else
1980
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
1981
            }
1982
            break;
1983
        case 0x14:
1984
            /* CMOVLBS */
1985
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1986
            break;
1987
        case 0x16:
1988
            /* CMOVLBC */
1989
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1990
            break;
1991
        case 0x20:
1992
            /* BIS */
1993
            if (likely(rc != 31)) {
1994
                if (ra != 31) {
1995
                    if (islit)
1996
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1997
                    else
1998
                        tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1999
                } else {
2000
                    if (islit)
2001
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2002
                    else
2003
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2004
                }
2005
            }
2006
            break;
2007
        case 0x24:
2008
            /* CMOVEQ */
2009
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2010
            break;
2011
        case 0x26:
2012
            /* CMOVNE */
2013
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2014
            break;
2015
        case 0x28:
2016
            /* ORNOT */
2017
            if (likely(rc != 31)) {
2018
                if (ra != 31) {
2019
                    if (islit)
2020
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2021
                    else
2022
                        tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2023
                } else {
2024
                    if (islit)
2025
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2026
                    else
2027
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2028
                }
2029
            }
2030
            break;
2031
        case 0x40:
2032
            /* XOR */
2033
            if (likely(rc != 31)) {
2034
                if (ra != 31) {
2035
                    if (islit)
2036
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2037
                    else
2038
                        tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2039
                } else {
2040
                    if (islit)
2041
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2042
                    else
2043
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2044
                }
2045
            }
2046
            break;
2047
        case 0x44:
2048
            /* CMOVLT */
2049
            gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2050
            break;
2051
        case 0x46:
2052
            /* CMOVGE */
2053
            gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2054
            break;
2055
        case 0x48:
2056
            /* EQV */
2057
            if (likely(rc != 31)) {
2058
                if (ra != 31) {
2059
                    if (islit)
2060
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2061
                    else
2062
                        tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2063
                } else {
2064
                    if (islit)
2065
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2066
                    else
2067
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2068
                }
2069
            }
2070
            break;
2071
        case 0x61:
2072
            /* AMASK */
2073
            if (likely(rc != 31)) {
2074
                if (islit)
2075
                    tcg_gen_movi_i64(cpu_ir[rc], lit);
2076
                else
2077
                    tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2078
                switch (ctx->env->implver) {
2079
                case IMPLVER_2106x:
2080
                    /* EV4, EV45, LCA, LCA45 & EV5 */
2081
                    break;
2082
                case IMPLVER_21164:
2083
                case IMPLVER_21264:
2084
                case IMPLVER_21364:
2085
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
2086
                                     ~(uint64_t)ctx->amask);
2087
                    break;
2088
                }
2089
            }
2090
            break;
2091
        case 0x64:
2092
            /* CMOVLE */
2093
            gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2094
            break;
2095
        case 0x66:
2096
            /* CMOVGT */
2097
            gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2098
            break;
2099
        case 0x6C:
2100
            /* IMPLVER */
2101
            if (rc != 31)
2102
                tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2103
            break;
2104
        default:
2105
            goto invalid_opc;
2106
        }
2107
        break;
2108
    case 0x12:
2109
        switch (fn7) {
2110
        case 0x02:
2111
            /* MSKBL */
2112
            gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2113
            break;
2114
        case 0x06:
2115
            /* EXTBL */
2116
            gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2117
            break;
2118
        case 0x0B:
2119
            /* INSBL */
2120
            gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2121
            break;
2122
        case 0x12:
2123
            /* MSKWL */
2124
            gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2125
            break;
2126
        case 0x16:
2127
            /* EXTWL */
2128
            gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2129
            break;
2130
        case 0x1B:
2131
            /* INSWL */
2132
            gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2133
            break;
2134
        case 0x22:
2135
            /* MSKLL */
2136
            gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2137
            break;
2138
        case 0x26:
2139
            /* EXTLL */
2140
            gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2141
            break;
2142
        case 0x2B:
2143
            /* INSLL */
2144
            gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2145
            break;
2146
        case 0x30:
2147
            /* ZAP */
2148
            gen_zap(ra, rb, rc, islit, lit);
2149
            break;
2150
        case 0x31:
2151
            /* ZAPNOT */
2152
            gen_zapnot(ra, rb, rc, islit, lit);
2153
            break;
2154
        case 0x32:
2155
            /* MSKQL */
2156
            gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2157
            break;
2158
        case 0x34:
2159
            /* SRL */
2160
            if (likely(rc != 31)) {
2161
                if (ra != 31) {
2162
                    if (islit)
2163
                        tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2164
                    else {
2165
                        TCGv shift = tcg_temp_new();
2166
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2167
                        tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2168
                        tcg_temp_free(shift);
2169
                    }
2170
                } else
2171
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2172
            }
2173
            break;
2174
        case 0x36:
2175
            /* EXTQL */
2176
            gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2177
            break;
2178
        case 0x39:
2179
            /* SLL */
2180
            if (likely(rc != 31)) {
2181
                if (ra != 31) {
2182
                    if (islit)
2183
                        tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2184
                    else {
2185
                        TCGv shift = tcg_temp_new();
2186
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2187
                        tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2188
                        tcg_temp_free(shift);
2189
                    }
2190
                } else
2191
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2192
            }
2193
            break;
2194
        case 0x3B:
2195
            /* INSQL */
2196
            gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2197
            break;
2198
        case 0x3C:
2199
            /* SRA */
2200
            if (likely(rc != 31)) {
2201
                if (ra != 31) {
2202
                    if (islit)
2203
                        tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2204
                    else {
2205
                        TCGv shift = tcg_temp_new();
2206
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2207
                        tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2208
                        tcg_temp_free(shift);
2209
                    }
2210
                } else
2211
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2212
            }
2213
            break;
2214
        case 0x52:
2215
            /* MSKWH */
2216
            gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2217
            break;
2218
        case 0x57:
2219
            /* INSWH */
2220
            gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2221
            break;
2222
        case 0x5A:
2223
            /* EXTWH */
2224
            gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2225
            break;
2226
        case 0x62:
2227
            /* MSKLH */
2228
            gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2229
            break;
2230
        case 0x67:
2231
            /* INSLH */
2232
            gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2233
            break;
2234
        case 0x6A:
2235
            /* EXTLH */
2236
            gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2237
            break;
2238
        case 0x72:
2239
            /* MSKQH */
2240
            gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2241
            break;
2242
        case 0x77:
2243
            /* INSQH */
2244
            gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2245
            break;
2246
        case 0x7A:
2247
            /* EXTQH */
2248
            gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2249
            break;
2250
        default:
2251
            goto invalid_opc;
2252
        }
2253
        break;
2254
    case 0x13:
2255
        switch (fn7) {
2256
        case 0x00:
2257
            /* MULL */
2258
            if (likely(rc != 31)) {
2259
                if (ra == 31)
2260
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2261
                else {
2262
                    if (islit)
2263
                        tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2264
                    else
2265
                        tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2266
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2267
                }
2268
            }
2269
            break;
2270
        case 0x20:
2271
            /* MULQ */
2272
            if (likely(rc != 31)) {
2273
                if (ra == 31)
2274
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2275
                else if (islit)
2276
                    tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2277
                else
2278
                    tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2279
            }
2280
            break;
2281
        case 0x30:
2282
            /* UMULH */
2283
            gen_umulh(ra, rb, rc, islit, lit);
2284
            break;
2285
        case 0x40:
2286
            /* MULL/V */
2287
            gen_mullv(ra, rb, rc, islit, lit);
2288
            break;
2289
        case 0x60:
2290
            /* MULQ/V */
2291
            gen_mulqv(ra, rb, rc, islit, lit);
2292
            break;
2293
        default:
2294
            goto invalid_opc;
2295
        }
2296
        break;
2297
    case 0x14:
2298
        switch (fpfn) { /* fn11 & 0x3F */
2299
        case 0x04:
2300
            /* ITOFS */
2301
            if (!(ctx->amask & AMASK_FIX))
2302
                goto invalid_opc;
2303
            if (likely(rc != 31)) {
2304
                if (ra != 31) {
2305
                    TCGv_i32 tmp = tcg_temp_new_i32();
2306
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2307
                    gen_helper_memory_to_s(cpu_fir[rc], tmp);
2308
                    tcg_temp_free_i32(tmp);
2309
                } else
2310
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2311
            }
2312
            break;
2313
        case 0x0A:
2314
            /* SQRTF */
2315
            if (!(ctx->amask & AMASK_FIX))
2316
                goto invalid_opc;
2317
            gen_fsqrtf(rb, rc);
2318
            break;
2319
        case 0x0B:
2320
            /* SQRTS */
2321
            if (!(ctx->amask & AMASK_FIX))
2322
                goto invalid_opc;
2323
            gen_fsqrts(ctx, rb, rc, fn11);
2324
            break;
2325
        case 0x14:
2326
            /* ITOFF */
2327
            if (!(ctx->amask & AMASK_FIX))
2328
                goto invalid_opc;
2329
            if (likely(rc != 31)) {
2330
                if (ra != 31) {
2331
                    TCGv_i32 tmp = tcg_temp_new_i32();
2332
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2333
                    gen_helper_memory_to_f(cpu_fir[rc], tmp);
2334
                    tcg_temp_free_i32(tmp);
2335
                } else
2336
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2337
            }
2338
            break;
2339
        case 0x24:
2340
            /* ITOFT */
2341
            if (!(ctx->amask & AMASK_FIX))
2342
                goto invalid_opc;
2343
            if (likely(rc != 31)) {
2344
                if (ra != 31)
2345
                    tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2346
                else
2347
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2348
            }
2349
            break;
2350
        case 0x2A:
2351
            /* SQRTG */
2352
            if (!(ctx->amask & AMASK_FIX))
2353
                goto invalid_opc;
2354
            gen_fsqrtg(rb, rc);
2355
            break;
2356
        case 0x02B:
2357
            /* SQRTT */
2358
            if (!(ctx->amask & AMASK_FIX))
2359
                goto invalid_opc;
2360
            gen_fsqrtt(ctx, rb, rc, fn11);
2361
            break;
2362
        default:
2363
            goto invalid_opc;
2364
        }
2365
        break;
2366
    case 0x15:
2367
        /* VAX floating point */
2368
        /* XXX: rounding mode and trap are ignored (!) */
2369
        switch (fpfn) { /* fn11 & 0x3F */
2370
        case 0x00:
2371
            /* ADDF */
2372
            gen_faddf(ra, rb, rc);
2373
            break;
2374
        case 0x01:
2375
            /* SUBF */
2376
            gen_fsubf(ra, rb, rc);
2377
            break;
2378
        case 0x02:
2379
            /* MULF */
2380
            gen_fmulf(ra, rb, rc);
2381
            break;
2382
        case 0x03:
2383
            /* DIVF */
2384
            gen_fdivf(ra, rb, rc);
2385
            break;
2386
        case 0x1E:
2387
            /* CVTDG */
2388
#if 0 // TODO
2389
            gen_fcvtdg(rb, rc);
2390
#else
2391
            goto invalid_opc;
2392
#endif
2393
            break;
2394
        case 0x20:
2395
            /* ADDG */
2396
            gen_faddg(ra, rb, rc);
2397
            break;
2398
        case 0x21:
2399
            /* SUBG */
2400
            gen_fsubg(ra, rb, rc);
2401
            break;
2402
        case 0x22:
2403
            /* MULG */
2404
            gen_fmulg(ra, rb, rc);
2405
            break;
2406
        case 0x23:
2407
            /* DIVG */
2408
            gen_fdivg(ra, rb, rc);
2409
            break;
2410
        case 0x25:
2411
            /* CMPGEQ */
2412
            gen_fcmpgeq(ra, rb, rc);
2413
            break;
2414
        case 0x26:
2415
            /* CMPGLT */
2416
            gen_fcmpglt(ra, rb, rc);
2417
            break;
2418
        case 0x27:
2419
            /* CMPGLE */
2420
            gen_fcmpgle(ra, rb, rc);
2421
            break;
2422
        case 0x2C:
2423
            /* CVTGF */
2424
            gen_fcvtgf(rb, rc);
2425
            break;
2426
        case 0x2D:
2427
            /* CVTGD */
2428
#if 0 // TODO
2429
            gen_fcvtgd(rb, rc);
2430
#else
2431
            goto invalid_opc;
2432
#endif
2433
            break;
2434
        case 0x2F:
2435
            /* CVTGQ */
2436
            gen_fcvtgq(rb, rc);
2437
            break;
2438
        case 0x3C:
2439
            /* CVTQF */
2440
            gen_fcvtqf(rb, rc);
2441
            break;
2442
        case 0x3E:
2443
            /* CVTQG */
2444
            gen_fcvtqg(rb, rc);
2445
            break;
2446
        default:
2447
            goto invalid_opc;
2448
        }
2449
        break;
2450
    case 0x16:
2451
        /* IEEE floating-point */
2452
        switch (fpfn) { /* fn11 & 0x3F */
2453
        case 0x00:
2454
            /* ADDS */
2455
            gen_fadds(ctx, ra, rb, rc, fn11);
2456
            break;
2457
        case 0x01:
2458
            /* SUBS */
2459
            gen_fsubs(ctx, ra, rb, rc, fn11);
2460
            break;
2461
        case 0x02:
2462
            /* MULS */
2463
            gen_fmuls(ctx, ra, rb, rc, fn11);
2464
            break;
2465
        case 0x03:
2466
            /* DIVS */
2467
            gen_fdivs(ctx, ra, rb, rc, fn11);
2468
            break;
2469
        case 0x20:
2470
            /* ADDT */
2471
            gen_faddt(ctx, ra, rb, rc, fn11);
2472
            break;
2473
        case 0x21:
2474
            /* SUBT */
2475
            gen_fsubt(ctx, ra, rb, rc, fn11);
2476
            break;
2477
        case 0x22:
2478
            /* MULT */
2479
            gen_fmult(ctx, ra, rb, rc, fn11);
2480
            break;
2481
        case 0x23:
2482
            /* DIVT */
2483
            gen_fdivt(ctx, ra, rb, rc, fn11);
2484
            break;
2485
        case 0x24:
2486
            /* CMPTUN */
2487
            gen_fcmptun(ctx, ra, rb, rc, fn11);
2488
            break;
2489
        case 0x25:
2490
            /* CMPTEQ */
2491
            gen_fcmpteq(ctx, ra, rb, rc, fn11);
2492
            break;
2493
        case 0x26:
2494
            /* CMPTLT */
2495
            gen_fcmptlt(ctx, ra, rb, rc, fn11);
2496
            break;
2497
        case 0x27:
2498
            /* CMPTLE */
2499
            gen_fcmptle(ctx, ra, rb, rc, fn11);
2500
            break;
2501
        case 0x2C:
2502
            if (fn11 == 0x2AC || fn11 == 0x6AC) {
2503
                /* CVTST */
2504
                gen_fcvtst(ctx, rb, rc, fn11);
2505
            } else {
2506
                /* CVTTS */
2507
                gen_fcvtts(ctx, rb, rc, fn11);
2508
            }
2509
            break;
2510
        case 0x2F:
2511
            /* CVTTQ */
2512
            gen_fcvttq(ctx, rb, rc, fn11);
2513
            break;
2514
        case 0x3C:
2515
            /* CVTQS */
2516
            gen_fcvtqs(ctx, rb, rc, fn11);
2517
            break;
2518
        case 0x3E:
2519
            /* CVTQT */
2520
            gen_fcvtqt(ctx, rb, rc, fn11);
2521
            break;
2522
        default:
2523
            goto invalid_opc;
2524
        }
2525
        break;
2526
    case 0x17:
2527
        switch (fn11) {
2528
        case 0x010:
2529
            /* CVTLQ */
2530
            gen_fcvtlq(rb, rc);
2531
            break;
2532
        case 0x020:
2533
            if (likely(rc != 31)) {
2534
                if (ra == rb) {
2535
                    /* FMOV */
2536
                    if (ra == 31)
2537
                        tcg_gen_movi_i64(cpu_fir[rc], 0);
2538
                    else
2539
                        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2540
                } else {
2541
                    /* CPYS */
2542
                    gen_fcpys(ra, rb, rc);
2543
                }
2544
            }
2545
            break;
2546
        case 0x021:
2547
            /* CPYSN */
2548
            gen_fcpysn(ra, rb, rc);
2549
            break;
2550
        case 0x022:
2551
            /* CPYSE */
2552
            gen_fcpyse(ra, rb, rc);
2553
            break;
2554
        case 0x024:
2555
            /* MT_FPCR */
2556
            if (likely(ra != 31))
2557
                gen_helper_store_fpcr(cpu_fir[ra]);
2558
            else {
2559
                TCGv tmp = tcg_const_i64(0);
2560
                gen_helper_store_fpcr(tmp);
2561
                tcg_temp_free(tmp);
2562
            }
2563
            break;
2564
        case 0x025:
2565
            /* MF_FPCR */
2566
            if (likely(ra != 31))
2567
                gen_helper_load_fpcr(cpu_fir[ra]);
2568
            break;
2569
        case 0x02A:
2570
            /* FCMOVEQ */
2571
            gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2572
            break;
2573
        case 0x02B:
2574
            /* FCMOVNE */
2575
            gen_fcmov(TCG_COND_NE, ra, rb, rc);
2576
            break;
2577
        case 0x02C:
2578
            /* FCMOVLT */
2579
            gen_fcmov(TCG_COND_LT, ra, rb, rc);
2580
            break;
2581
        case 0x02D:
2582
            /* FCMOVGE */
2583
            gen_fcmov(TCG_COND_GE, ra, rb, rc);
2584
            break;
2585
        case 0x02E:
2586
            /* FCMOVLE */
2587
            gen_fcmov(TCG_COND_LE, ra, rb, rc);
2588
            break;
2589
        case 0x02F:
2590
            /* FCMOVGT */
2591
            gen_fcmov(TCG_COND_GT, ra, rb, rc);
2592
            break;
2593
        case 0x030:
2594
            /* CVTQL */
2595
            gen_fcvtql(rb, rc);
2596
            break;
2597
        case 0x130:
2598
            /* CVTQL/V */
2599
        case 0x530:
2600
            /* CVTQL/SV */
2601
            /* ??? I'm pretty sure there's nothing that /sv needs to do that
2602
               /v doesn't do.  The only thing I can think is that /sv is a
2603
               valid instruction merely for completeness in the ISA.  */
2604
            gen_fcvtql_v(ctx, rb, rc);
2605
            break;
2606
        default:
2607
            goto invalid_opc;
2608
        }
2609
        break;
2610
    case 0x18:
2611
        switch ((uint16_t)disp16) {
2612
        case 0x0000:
2613
            /* TRAPB */
2614
            /* No-op.  */
2615
            break;
2616
        case 0x0400:
2617
            /* EXCB */
2618
            /* No-op.  */
2619
            break;
2620
        case 0x4000:
2621
            /* MB */
2622
            /* No-op */
2623
            break;
2624
        case 0x4400:
2625
            /* WMB */
2626
            /* No-op */
2627
            break;
2628
        case 0x8000:
2629
            /* FETCH */
2630
            /* No-op */
2631
            break;
2632
        case 0xA000:
2633
            /* FETCH_M */
2634
            /* No-op */
2635
            break;
2636
        case 0xC000:
2637
            /* RPCC */
2638
            if (ra != 31)
2639
                gen_helper_load_pcc(cpu_ir[ra]);
2640
            break;
2641
        case 0xE000:
2642
            /* RC */
2643
            gen_rx(ra, 0);
2644
            break;
2645
        case 0xE800:
2646
            /* ECB */
2647
            break;
2648
        case 0xF000:
2649
            /* RS */
2650
            gen_rx(ra, 1);
2651
            break;
2652
        case 0xF800:
2653
            /* WH64 */
2654
            /* No-op */
2655
            break;
2656
        default:
2657
            goto invalid_opc;
2658
        }
2659
        break;
2660
    case 0x19:
2661
        /* HW_MFPR (PALcode) */
2662
#ifndef CONFIG_USER_ONLY
2663
        if (ctx->pal_mode) {
2664
            gen_mfpr(ra, insn & 0xffff);
2665
            break;
2666
        }
2667
#endif
2668
        goto invalid_opc;
2669
    case 0x1A:
2670
        /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2671
           prediction stack action, which of course we don't implement.  */
2672
        if (rb != 31) {
2673
            tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2674
        } else {
2675
            tcg_gen_movi_i64(cpu_pc, 0);
2676
        }
2677
        if (ra != 31) {
2678
            tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2679
        }
2680
        ret = EXIT_PC_UPDATED;
2681
        break;
2682
    case 0x1B:
2683
        /* HW_LD (PALcode) */
2684
#if defined (CONFIG_USER_ONLY)
2685
        goto invalid_opc;
2686
#else
2687
        if (!ctx->pal_mode)
2688
            goto invalid_opc;
2689
        if (ra != 31) {
2690
            TCGv addr = tcg_temp_new();
2691
            if (rb != 31)
2692
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2693
            else
2694
                tcg_gen_movi_i64(addr, disp12);
2695
            switch ((insn >> 12) & 0xF) {
2696
            case 0x0:
2697
                /* Longword physical access (hw_ldl/p) */
2698
                gen_helper_ldl_phys(cpu_ir[ra], addr);
2699
                break;
2700
            case 0x1:
2701
                /* Quadword physical access (hw_ldq/p) */
2702
                gen_helper_ldq_phys(cpu_ir[ra], addr);
2703
                break;
2704
            case 0x2:
2705
                /* Longword physical access with lock (hw_ldl_l/p) */
2706
                gen_helper_ldl_l_phys(cpu_ir[ra], addr);
2707
                break;
2708
            case 0x3:
2709
                /* Quadword physical access with lock (hw_ldq_l/p) */
2710
                gen_helper_ldq_l_phys(cpu_ir[ra], addr);
2711
                break;
2712
            case 0x4:
2713
                /* Longword virtual PTE fetch (hw_ldl/v) */
2714
                goto invalid_opc;
2715
            case 0x5:
2716
                /* Quadword virtual PTE fetch (hw_ldq/v) */
2717
                goto invalid_opc;
2718
                break;
2719
            case 0x6:
2720
                /* Incpu_ir[ra]id */
2721
                goto invalid_opc;
2722
            case 0x7:
2723
                /* Incpu_ir[ra]id */
2724
                goto invalid_opc;
2725
            case 0x8:
2726
                /* Longword virtual access (hw_ldl) */
2727
                goto invalid_opc;
2728
            case 0x9:
2729
                /* Quadword virtual access (hw_ldq) */
2730
                goto invalid_opc;
2731
            case 0xA:
2732
                /* Longword virtual access with protection check (hw_ldl/w) */
2733
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2734
                break;
2735
            case 0xB:
2736
                /* Quadword virtual access with protection check (hw_ldq/w) */
2737
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2738
                break;
2739
            case 0xC:
2740
                /* Longword virtual access with alt access mode (hw_ldl/a)*/
2741
                goto invalid_opc;
2742
            case 0xD:
2743
                /* Quadword virtual access with alt access mode (hw_ldq/a) */
2744
                goto invalid_opc;
2745
            case 0xE:
2746
                /* Longword virtual access with alternate access mode and
2747
                   protection checks (hw_ldl/wa) */
2748
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2749
                break;
2750
            case 0xF:
2751
                /* Quadword virtual access with alternate access mode and
2752
                   protection checks (hw_ldq/wa) */
2753
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2754
                break;
2755
            }
2756
            tcg_temp_free(addr);
2757
        }
2758
        break;
2759
#endif
2760
    case 0x1C:
2761
        switch (fn7) {
2762
        case 0x00:
2763
            /* SEXTB */
2764
            if (!(ctx->amask & AMASK_BWX))
2765
                goto invalid_opc;
2766
            if (likely(rc != 31)) {
2767
                if (islit)
2768
                    tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2769
                else
2770
                    tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2771
            }
2772
            break;
2773
        case 0x01:
2774
            /* SEXTW */
2775
            if (!(ctx->amask & AMASK_BWX))
2776
                goto invalid_opc;
2777
            if (likely(rc != 31)) {
2778
                if (islit)
2779
                    tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2780
                else
2781
                    tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2782
            }
2783
            break;
2784
        case 0x30:
2785
            /* CTPOP */
2786
            if (!(ctx->amask & AMASK_CIX))
2787
                goto invalid_opc;
2788
            if (likely(rc != 31)) {
2789
                if (islit)
2790
                    tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2791
                else
2792
                    gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2793
            }
2794
            break;
2795
        case 0x31:
2796
            /* PERR */
2797
            if (!(ctx->amask & AMASK_MVI))
2798
                goto invalid_opc;
2799
            gen_perr(ra, rb, rc, islit, lit);
2800
            break;
2801
        case 0x32:
2802
            /* CTLZ */
2803
            if (!(ctx->amask & AMASK_CIX))
2804
                goto invalid_opc;
2805
            if (likely(rc != 31)) {
2806
                if (islit)
2807
                    tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2808
                else
2809
                    gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2810
            }
2811
            break;
2812
        case 0x33:
2813
            /* CTTZ */
2814
            if (!(ctx->amask & AMASK_CIX))
2815
                goto invalid_opc;
2816
            if (likely(rc != 31)) {
2817
                if (islit)
2818
                    tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2819
                else
2820
                    gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2821
            }
2822
            break;
2823
        case 0x34:
2824
            /* UNPKBW */
2825
            if (!(ctx->amask & AMASK_MVI))
2826
                goto invalid_opc;
2827
            if (real_islit || ra != 31)
2828
                goto invalid_opc;
2829
            gen_unpkbw (rb, rc);
2830
            break;
2831
        case 0x35:
2832
            /* UNPKBL */
2833
            if (!(ctx->amask & AMASK_MVI))
2834
                goto invalid_opc;
2835
            if (real_islit || ra != 31)
2836
                goto invalid_opc;
2837
            gen_unpkbl (rb, rc);
2838
            break;
2839
        case 0x36:
2840
            /* PKWB */
2841
            if (!(ctx->amask & AMASK_MVI))
2842
                goto invalid_opc;
2843
            if (real_islit || ra != 31)
2844
                goto invalid_opc;
2845
            gen_pkwb (rb, rc);
2846
            break;
2847
        case 0x37:
2848
            /* PKLB */
2849
            if (!(ctx->amask & AMASK_MVI))
2850
                goto invalid_opc;
2851
            if (real_islit || ra != 31)
2852
                goto invalid_opc;
2853
            gen_pklb (rb, rc);
2854
            break;
2855
        case 0x38:
2856
            /* MINSB8 */
2857
            if (!(ctx->amask & AMASK_MVI))
2858
                goto invalid_opc;
2859
            gen_minsb8 (ra, rb, rc, islit, lit);
2860
            break;
2861
        case 0x39:
2862
            /* MINSW4 */
2863
            if (!(ctx->amask & AMASK_MVI))
2864
                goto invalid_opc;
2865
            gen_minsw4 (ra, rb, rc, islit, lit);
2866
            break;
2867
        case 0x3A:
2868
            /* MINUB8 */
2869
            if (!(ctx->amask & AMASK_MVI))
2870
                goto invalid_opc;
2871
            gen_minub8 (ra, rb, rc, islit, lit);
2872
            break;
2873
        case 0x3B:
2874
            /* MINUW4 */
2875
            if (!(ctx->amask & AMASK_MVI))
2876
                goto invalid_opc;
2877
            gen_minuw4 (ra, rb, rc, islit, lit);
2878
            break;
2879
        case 0x3C:
2880
            /* MAXUB8 */
2881
            if (!(ctx->amask & AMASK_MVI))
2882
                goto invalid_opc;
2883
            gen_maxub8 (ra, rb, rc, islit, lit);
2884
            break;
2885
        case 0x3D:
2886
            /* MAXUW4 */
2887
            if (!(ctx->amask & AMASK_MVI))
2888
                goto invalid_opc;
2889
            gen_maxuw4 (ra, rb, rc, islit, lit);
2890
            break;
2891
        case 0x3E:
2892
            /* MAXSB8 */
2893
            if (!(ctx->amask & AMASK_MVI))
2894
                goto invalid_opc;
2895
            gen_maxsb8 (ra, rb, rc, islit, lit);
2896
            break;
2897
        case 0x3F:
2898
            /* MAXSW4 */
2899
            if (!(ctx->amask & AMASK_MVI))
2900
                goto invalid_opc;
2901
            gen_maxsw4 (ra, rb, rc, islit, lit);
2902
            break;
2903
        case 0x70:
2904
            /* FTOIT */
2905
            if (!(ctx->amask & AMASK_FIX))
2906
                goto invalid_opc;
2907
            if (likely(rc != 31)) {
2908
                if (ra != 31)
2909
                    tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2910
                else
2911
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2912
            }
2913
            break;
2914
        case 0x78:
2915
            /* FTOIS */
2916
            if (!(ctx->amask & AMASK_FIX))
2917
                goto invalid_opc;
2918
            if (rc != 31) {
2919
                TCGv_i32 tmp1 = tcg_temp_new_i32();
2920
                if (ra != 31)
2921
                    gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2922
                else {
2923
                    TCGv tmp2 = tcg_const_i64(0);
2924
                    gen_helper_s_to_memory(tmp1, tmp2);
2925
                    tcg_temp_free(tmp2);
2926
                }
2927
                tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2928
                tcg_temp_free_i32(tmp1);
2929
            }
2930
            break;
2931
        default:
2932
            goto invalid_opc;
2933
        }
2934
        break;
2935
    case 0x1D:
2936
        /* HW_MTPR (PALcode) */
2937
#ifndef CONFIG_USER_ONLY
2938
        if (ctx->pal_mode) {
2939
            gen_mtpr(ra, insn & 0xffff);
2940
            break;
2941
        }
2942
#endif
2943
        goto invalid_opc;
2944
    case 0x1E:
2945
        /* HW_RET (PALcode) */
2946
#if defined (CONFIG_USER_ONLY)
2947
        goto invalid_opc;
2948
#else
2949
        if (!ctx->pal_mode)
2950
            goto invalid_opc;
2951
        if (rb == 31) {
2952
            /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2953
               address from EXC_ADDR.  This turns out to be useful for our
2954
               emulation PALcode, so continue to accept it.  */
2955
            TCGv tmp = tcg_temp_new();
2956
            /* FIXME: Get exc_addr.  */
2957
            gen_helper_hw_ret(tmp);
2958
            tcg_temp_free(tmp);
2959
        } else {
2960
            gen_helper_hw_ret(cpu_ir[rb]);
2961
        }
2962
        ret = EXIT_PC_UPDATED;
2963
        break;
2964
#endif
2965
    case 0x1F:
2966
        /* HW_ST (PALcode) */
2967
#if defined (CONFIG_USER_ONLY)
2968
        goto invalid_opc;
2969
#else
2970
        if (!ctx->pal_mode)
2971
            goto invalid_opc;
2972
        else {
2973
            TCGv addr, val;
2974
            addr = tcg_temp_new();
2975
            if (rb != 31)
2976
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2977
            else
2978
                tcg_gen_movi_i64(addr, disp12);
2979
            if (ra != 31)
2980
                val = cpu_ir[ra];
2981
            else {
2982
                val = tcg_temp_new();
2983
                tcg_gen_movi_i64(val, 0);
2984
            }
2985
            switch ((insn >> 12) & 0xF) {
2986
            case 0x0:
2987
                /* Longword physical access */
2988
                gen_helper_stl_phys(addr, val);
2989
                break;
2990
            case 0x1:
2991
                /* Quadword physical access */
2992
                gen_helper_stq_phys(addr, val);
2993
                break;
2994
            case 0x2:
2995
                /* Longword physical access with lock */
2996
                gen_helper_stl_c_phys(val, addr, val);
2997
                break;
2998
            case 0x3:
2999
                /* Quadword physical access with lock */
3000
                gen_helper_stq_c_phys(val, addr, val);
3001
                break;
3002
            case 0x4:
3003
                /* Longword virtual access */
3004
                goto invalid_opc;
3005
            case 0x5:
3006
                /* Quadword virtual access */
3007
                goto invalid_opc;
3008
            case 0x6:
3009
                /* Invalid */
3010
                goto invalid_opc;
3011
            case 0x7:
3012
                /* Invalid */
3013
                goto invalid_opc;
3014
            case 0x8:
3015
                /* Invalid */
3016
                goto invalid_opc;
3017
            case 0x9:
3018
                /* Invalid */
3019
                goto invalid_opc;
3020
            case 0xA:
3021
                /* Invalid */
3022
                goto invalid_opc;
3023
            case 0xB:
3024
                /* Invalid */
3025
                goto invalid_opc;
3026
            case 0xC:
3027
                /* Longword virtual access with alternate access mode */
3028
                goto invalid_opc;
3029
            case 0xD:
3030
                /* Quadword virtual access with alternate access mode */
3031
                goto invalid_opc;
3032
            case 0xE:
3033
                /* Invalid */
3034
                goto invalid_opc;
3035
            case 0xF:
3036
                /* Invalid */
3037
                goto invalid_opc;
3038
            }
3039
            if (ra == 31)
3040
                tcg_temp_free(val);
3041
            tcg_temp_free(addr);
3042
        }
3043
        break;
3044
#endif
3045
    case 0x20:
3046
        /* LDF */
3047
        gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3048
        break;
3049
    case 0x21:
3050
        /* LDG */
3051
        gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3052
        break;
3053
    case 0x22:
3054
        /* LDS */
3055
        gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3056
        break;
3057
    case 0x23:
3058
        /* LDT */
3059
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3060
        break;
3061
    case 0x24:
3062
        /* STF */
3063
        gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3064
        break;
3065
    case 0x25:
3066
        /* STG */
3067
        gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3068
        break;
3069
    case 0x26:
3070
        /* STS */
3071
        gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3072
        break;
3073
    case 0x27:
3074
        /* STT */
3075
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3076
        break;
3077
    case 0x28:
3078
        /* LDL */
3079
        gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3080
        break;
3081
    case 0x29:
3082
        /* LDQ */
3083
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3084
        break;
3085
    case 0x2A:
3086
        /* LDL_L */
3087
        gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3088
        break;
3089
    case 0x2B:
3090
        /* LDQ_L */
3091
        gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3092
        break;
3093
    case 0x2C:
3094
        /* STL */
3095
        gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3096
        break;
3097
    case 0x2D:
3098
        /* STQ */
3099
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3100
        break;
3101
    case 0x2E:
3102
        /* STL_C */
3103
        ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3104
        break;
3105
    case 0x2F:
3106
        /* STQ_C */
3107
        ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3108
        break;
3109
    case 0x30:
3110
        /* BR */
3111
        ret = gen_bdirect(ctx, ra, disp21);
3112
        break;
3113
    case 0x31: /* FBEQ */
3114
        ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3115
        break;
3116
    case 0x32: /* FBLT */
3117
        ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3118
        break;
3119
    case 0x33: /* FBLE */
3120
        ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3121
        break;
3122
    case 0x34:
3123
        /* BSR */
3124
        ret = gen_bdirect(ctx, ra, disp21);
3125
        break;
3126
    case 0x35: /* FBNE */
3127
        ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3128
        break;
3129
    case 0x36: /* FBGE */
3130
        ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3131
        break;
3132
    case 0x37: /* FBGT */
3133
        ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3134
        break;
3135
    case 0x38:
3136
        /* BLBC */
3137
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3138
        break;
3139
    case 0x39:
3140
        /* BEQ */
3141
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3142
        break;
3143
    case 0x3A:
3144
        /* BLT */
3145
        ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3146
        break;
3147
    case 0x3B:
3148
        /* BLE */
3149
        ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3150
        break;
3151
    case 0x3C:
3152
        /* BLBS */
3153
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3154
        break;
3155
    case 0x3D:
3156
        /* BNE */
3157
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3158
        break;
3159
    case 0x3E:
3160
        /* BGE */
3161
        ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3162
        break;
3163
    case 0x3F:
3164
        /* BGT */
3165
        ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3166
        break;
3167
    invalid_opc:
3168
        ret = gen_invalid(ctx);
3169
        break;
3170
    }
3171

    
3172
    return ret;
3173
}
3174

    
3175
static inline void gen_intermediate_code_internal(CPUState *env,
3176
                                                  TranslationBlock *tb,
3177
                                                  int search_pc)
3178
{
3179
    DisasContext ctx, *ctxp = &ctx;
3180
    target_ulong pc_start;
3181
    uint32_t insn;
3182
    uint16_t *gen_opc_end;
3183
    CPUBreakpoint *bp;
3184
    int j, lj = -1;
3185
    ExitStatus ret;
3186
    int num_insns;
3187
    int max_insns;
3188

    
3189
    pc_start = tb->pc;
3190
    gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3191

    
3192
    ctx.tb = tb;
3193
    ctx.env = env;
3194
    ctx.pc = pc_start;
3195
    ctx.amask = env->amask;
3196
    ctx.mem_idx = cpu_mmu_index(env);
3197
#if !defined (CONFIG_USER_ONLY)
3198
    ctx.pal_mode = env->pal_mode;
3199
#endif
3200

    
3201
    /* ??? Every TB begins with unset rounding mode, to be initialized on
3202
       the first fp insn of the TB.  Alternately we could define a proper
3203
       default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3204
       to reset the FP_STATUS to that default at the end of any TB that
3205
       changes the default.  We could even (gasp) dynamiclly figure out
3206
       what default would be most efficient given the running program.  */
3207
    ctx.tb_rm = -1;
3208
    /* Similarly for flush-to-zero.  */
3209
    ctx.tb_ftz = -1;
3210

    
3211
    num_insns = 0;
3212
    max_insns = tb->cflags & CF_COUNT_MASK;
3213
    if (max_insns == 0)
3214
        max_insns = CF_COUNT_MASK;
3215

    
3216
    gen_icount_start();
3217
    do {
3218
        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3219
            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3220
                if (bp->pc == ctx.pc) {
3221
                    gen_excp(&ctx, EXCP_DEBUG, 0);
3222
                    break;
3223
                }
3224
            }
3225
        }
3226
        if (search_pc) {
3227
            j = gen_opc_ptr - gen_opc_buf;
3228
            if (lj < j) {
3229
                lj++;
3230
                while (lj < j)
3231
                    gen_opc_instr_start[lj++] = 0;
3232
            }
3233
            gen_opc_pc[lj] = ctx.pc;
3234
            gen_opc_instr_start[lj] = 1;
3235
            gen_opc_icount[lj] = num_insns;
3236
        }
3237
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3238
            gen_io_start();
3239
        insn = ldl_code(ctx.pc);
3240
        num_insns++;
3241

    
3242
        if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3243
            tcg_gen_debug_insn_start(ctx.pc);
3244
        }
3245

    
3246
        ctx.pc += 4;
3247
        ret = translate_one(ctxp, insn);
3248

    
3249
        /* If we reach a page boundary, are single stepping,
3250
           or exhaust instruction count, stop generation.  */
3251
        if (ret == NO_EXIT
3252
            && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3253
                || gen_opc_ptr >= gen_opc_end
3254
                || num_insns >= max_insns
3255
                || singlestep
3256
                || env->singlestep_enabled)) {
3257
            ret = EXIT_PC_STALE;
3258
        }
3259
    } while (ret == NO_EXIT);
3260

    
3261
    if (tb->cflags & CF_LAST_IO) {
3262
        gen_io_end();
3263
    }
3264

    
3265
    switch (ret) {
3266
    case EXIT_GOTO_TB:
3267
    case EXIT_NORETURN:
3268
        break;
3269
    case EXIT_PC_STALE:
3270
        tcg_gen_movi_i64(cpu_pc, ctx.pc);
3271
        /* FALLTHRU */
3272
    case EXIT_PC_UPDATED:
3273
        if (env->singlestep_enabled) {
3274
            gen_excp_1(EXCP_DEBUG, 0);
3275
        } else {
3276
            tcg_gen_exit_tb(0);
3277
        }
3278
        break;
3279
    default:
3280
        abort();
3281
    }
3282

    
3283
    gen_icount_end(tb, num_insns);
3284
    *gen_opc_ptr = INDEX_op_end;
3285
    if (search_pc) {
3286
        j = gen_opc_ptr - gen_opc_buf;
3287
        lj++;
3288
        while (lj <= j)
3289
            gen_opc_instr_start[lj++] = 0;
3290
    } else {
3291
        tb->size = ctx.pc - pc_start;
3292
        tb->icount = num_insns;
3293
    }
3294

    
3295
#ifdef DEBUG_DISAS
3296
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3297
        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3298
        log_target_disas(pc_start, ctx.pc - pc_start, 1);
3299
        qemu_log("\n");
3300
    }
3301
#endif
3302
}
3303

    
3304
void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3305
{
3306
    gen_intermediate_code_internal(env, tb, 0);
3307
}
3308

    
3309
void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3310
{
3311
    gen_intermediate_code_internal(env, tb, 1);
3312
}
3313

    
3314
struct cpu_def_t {
3315
    const char *name;
3316
    int implver, amask;
3317
};
3318

    
3319
static const struct cpu_def_t cpu_defs[] = {
3320
    { "ev4",   IMPLVER_2106x, 0 },
3321
    { "ev5",   IMPLVER_21164, 0 },
3322
    { "ev56",  IMPLVER_21164, AMASK_BWX },
3323
    { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3324
    { "ev6",   IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3325
    { "ev67",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3326
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3327
    { "ev68",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3328
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3329
    { "21064", IMPLVER_2106x, 0 },
3330
    { "21164", IMPLVER_21164, 0 },
3331
    { "21164a", IMPLVER_21164, AMASK_BWX },
3332
    { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3333
    { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3334
    { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3335
                                | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3336
};
3337

    
3338
CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3339
{
3340
    CPUAlphaState *env;
3341
    int implver, amask, i, max;
3342

    
3343
    env = qemu_mallocz(sizeof(CPUAlphaState));
3344
    cpu_exec_init(env);
3345
    alpha_translate_init();
3346
    tlb_flush(env, 1);
3347

    
3348
    /* Default to ev67; no reason not to emulate insns by default.  */
3349
    implver = IMPLVER_21264;
3350
    amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3351
             | AMASK_TRAP | AMASK_PREFETCH);
3352

    
3353
    max = ARRAY_SIZE(cpu_defs);
3354
    for (i = 0; i < max; i++) {
3355
        if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3356
            implver = cpu_defs[i].implver;
3357
            amask = cpu_defs[i].amask;
3358
            break;
3359
        }
3360
    }
3361
    env->implver = implver;
3362
    env->amask = amask;
3363

    
3364
#if defined (CONFIG_USER_ONLY)
3365
    env->ps = PS_USER_MODE;
3366
    cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3367
                               | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3368
#endif
3369
    env->lock_addr = -1;
3370
    env->fen = 1;
3371

    
3372
    qemu_init_vcpu(env);
3373
    return env;
3374
}
3375

    
3376
void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
3377
{
3378
    env->pc = gen_opc_pc[pc_pos];
3379
}