Statistics
| Branch: | Revision:

root / target-alpha / translate.c @ 3b4fefd6

History | View | Annotate | Download (103 kB)

1
/*
2
 *  Alpha emulation cpu translation for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include <stdint.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23

    
24
#include "cpu.h"
25
#include "exec-all.h"
26
#include "disas.h"
27
#include "host-utils.h"
28
#include "tcg-op.h"
29
#include "qemu-common.h"
30

    
31
#include "helper.h"
32
#define GEN_HELPER 1
33
#include "helper.h"
34

    
35
#undef ALPHA_DEBUG_DISAS
36
#define CONFIG_SOFTFLOAT_INLINE
37

    
38
#ifdef ALPHA_DEBUG_DISAS
39
#  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40
#else
41
#  define LOG_DISAS(...) do { } while (0)
42
#endif
43

    
44
typedef struct DisasContext DisasContext;
45
struct DisasContext {
46
    struct TranslationBlock *tb;
47
    CPUAlphaState *env;
48
    uint64_t pc;
49
    int mem_idx;
50

    
51
    /* Current rounding mode for this TB.  */
52
    int tb_rm;
53
    /* Current flush-to-zero setting for this TB.  */
54
    int tb_ftz;
55
};
56

    
57
/* Return values from translate_one, indicating the state of the TB.
58
   Note that zero indicates that we are not exiting the TB.  */
59

    
60
typedef enum {
61
    NO_EXIT,
62

    
63
    /* We have emitted one or more goto_tb.  No fixup required.  */
64
    EXIT_GOTO_TB,
65

    
66
    /* We are not using a goto_tb (for whatever reason), but have updated
67
       the PC (for whatever reason), so there's no need to do it again on
68
       exiting the TB.  */
69
    EXIT_PC_UPDATED,
70

    
71
    /* We are exiting the TB, but have neither emitted a goto_tb, nor
72
       updated the PC for the next instruction to be executed.  */
73
    EXIT_PC_STALE,
74

    
75
    /* We are ending the TB with a noreturn function call, e.g. longjmp.
76
       No following code will be executed.  */
77
    EXIT_NORETURN,
78
} ExitStatus;
79

    
80
/* global register indexes */
81
static TCGv_ptr cpu_env;
82
static TCGv cpu_ir[31];
83
static TCGv cpu_fir[31];
84
static TCGv cpu_pc;
85
static TCGv cpu_lock_addr;
86
static TCGv cpu_lock_st_addr;
87
static TCGv cpu_lock_value;
88
static TCGv cpu_unique;
89
#ifndef CONFIG_USER_ONLY
90
static TCGv cpu_sysval;
91
static TCGv cpu_usp;
92
#endif
93

    
94
/* register names */
95
static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
96

    
97
#include "gen-icount.h"
98

    
99
static void alpha_translate_init(void)
100
{
101
    int i;
102
    char *p;
103
    static int done_init = 0;
104

    
105
    if (done_init)
106
        return;
107

    
108
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
109

    
110
    p = cpu_reg_names;
111
    for (i = 0; i < 31; i++) {
112
        sprintf(p, "ir%d", i);
113
        cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
114
                                           offsetof(CPUState, ir[i]), p);
115
        p += (i < 10) ? 4 : 5;
116

    
117
        sprintf(p, "fir%d", i);
118
        cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
119
                                            offsetof(CPUState, fir[i]), p);
120
        p += (i < 10) ? 5 : 6;
121
    }
122

    
123
    cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
124
                                    offsetof(CPUState, pc), "pc");
125

    
126
    cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
127
                                           offsetof(CPUState, lock_addr),
128
                                           "lock_addr");
129
    cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
130
                                              offsetof(CPUState, lock_st_addr),
131
                                              "lock_st_addr");
132
    cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
133
                                            offsetof(CPUState, lock_value),
134
                                            "lock_value");
135

    
136
    cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
137
                                        offsetof(CPUState, unique), "unique");
138
#ifndef CONFIG_USER_ONLY
139
    cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
140
                                        offsetof(CPUState, sysval), "sysval");
141
    cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
142
                                     offsetof(CPUState, usp), "usp");
143
#endif
144

    
145
    /* register helpers */
146
#define GEN_HELPER 2
147
#include "helper.h"
148

    
149
    done_init = 1;
150
}
151

    
152
static void gen_excp_1(int exception, int error_code)
153
{
154
    TCGv_i32 tmp1, tmp2;
155

    
156
    tmp1 = tcg_const_i32(exception);
157
    tmp2 = tcg_const_i32(error_code);
158
    gen_helper_excp(tmp1, tmp2);
159
    tcg_temp_free_i32(tmp2);
160
    tcg_temp_free_i32(tmp1);
161
}
162

    
163
static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
164
{
165
    tcg_gen_movi_i64(cpu_pc, ctx->pc);
166
    gen_excp_1(exception, error_code);
167
    return EXIT_NORETURN;
168
}
169

    
170
static inline ExitStatus gen_invalid(DisasContext *ctx)
171
{
172
    return gen_excp(ctx, EXCP_OPCDEC, 0);
173
}
174

    
175
static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
176
{
177
    TCGv tmp = tcg_temp_new();
178
    TCGv_i32 tmp32 = tcg_temp_new_i32();
179
    tcg_gen_qemu_ld32u(tmp, t1, flags);
180
    tcg_gen_trunc_i64_i32(tmp32, tmp);
181
    gen_helper_memory_to_f(t0, tmp32);
182
    tcg_temp_free_i32(tmp32);
183
    tcg_temp_free(tmp);
184
}
185

    
186
static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
187
{
188
    TCGv tmp = tcg_temp_new();
189
    tcg_gen_qemu_ld64(tmp, t1, flags);
190
    gen_helper_memory_to_g(t0, tmp);
191
    tcg_temp_free(tmp);
192
}
193

    
194
static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
195
{
196
    TCGv tmp = tcg_temp_new();
197
    TCGv_i32 tmp32 = tcg_temp_new_i32();
198
    tcg_gen_qemu_ld32u(tmp, t1, flags);
199
    tcg_gen_trunc_i64_i32(tmp32, tmp);
200
    gen_helper_memory_to_s(t0, tmp32);
201
    tcg_temp_free_i32(tmp32);
202
    tcg_temp_free(tmp);
203
}
204

    
205
static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
206
{
207
    tcg_gen_qemu_ld32s(t0, t1, flags);
208
    tcg_gen_mov_i64(cpu_lock_addr, t1);
209
    tcg_gen_mov_i64(cpu_lock_value, t0);
210
}
211

    
212
static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
213
{
214
    tcg_gen_qemu_ld64(t0, t1, flags);
215
    tcg_gen_mov_i64(cpu_lock_addr, t1);
216
    tcg_gen_mov_i64(cpu_lock_value, t0);
217
}
218

    
219
static inline void gen_load_mem(DisasContext *ctx,
220
                                void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
221
                                                          int flags),
222
                                int ra, int rb, int32_t disp16, int fp,
223
                                int clear)
224
{
225
    TCGv addr, va;
226

    
227
    /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
228
       prefetches, which we can treat as nops.  No worries about
229
       missed exceptions here.  */
230
    if (unlikely(ra == 31)) {
231
        return;
232
    }
233

    
234
    addr = tcg_temp_new();
235
    if (rb != 31) {
236
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
237
        if (clear) {
238
            tcg_gen_andi_i64(addr, addr, ~0x7);
239
        }
240
    } else {
241
        if (clear) {
242
            disp16 &= ~0x7;
243
        }
244
        tcg_gen_movi_i64(addr, disp16);
245
    }
246

    
247
    va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
248
    tcg_gen_qemu_load(va, addr, ctx->mem_idx);
249

    
250
    tcg_temp_free(addr);
251
}
252

    
253
static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
254
{
255
    TCGv_i32 tmp32 = tcg_temp_new_i32();
256
    TCGv tmp = tcg_temp_new();
257
    gen_helper_f_to_memory(tmp32, t0);
258
    tcg_gen_extu_i32_i64(tmp, tmp32);
259
    tcg_gen_qemu_st32(tmp, t1, flags);
260
    tcg_temp_free(tmp);
261
    tcg_temp_free_i32(tmp32);
262
}
263

    
264
static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
265
{
266
    TCGv tmp = tcg_temp_new();
267
    gen_helper_g_to_memory(tmp, t0);
268
    tcg_gen_qemu_st64(tmp, t1, flags);
269
    tcg_temp_free(tmp);
270
}
271

    
272
static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
273
{
274
    TCGv_i32 tmp32 = tcg_temp_new_i32();
275
    TCGv tmp = tcg_temp_new();
276
    gen_helper_s_to_memory(tmp32, t0);
277
    tcg_gen_extu_i32_i64(tmp, tmp32);
278
    tcg_gen_qemu_st32(tmp, t1, flags);
279
    tcg_temp_free(tmp);
280
    tcg_temp_free_i32(tmp32);
281
}
282

    
283
static inline void gen_store_mem(DisasContext *ctx,
284
                                 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
285
                                                            int flags),
286
                                 int ra, int rb, int32_t disp16, int fp,
287
                                 int clear)
288
{
289
    TCGv addr, va;
290

    
291
    addr = tcg_temp_new();
292
    if (rb != 31) {
293
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
294
        if (clear) {
295
            tcg_gen_andi_i64(addr, addr, ~0x7);
296
        }
297
    } else {
298
        if (clear) {
299
            disp16 &= ~0x7;
300
        }
301
        tcg_gen_movi_i64(addr, disp16);
302
    }
303

    
304
    if (ra == 31) {
305
        va = tcg_const_i64(0);
306
    } else {
307
        va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
308
    }
309
    tcg_gen_qemu_store(va, addr, ctx->mem_idx);
310

    
311
    tcg_temp_free(addr);
312
    if (ra == 31) {
313
        tcg_temp_free(va);
314
    }
315
}
316

    
317
static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
318
                                        int32_t disp16, int quad)
319
{
320
    TCGv addr;
321

    
322
    if (ra == 31) {
323
        /* ??? Don't bother storing anything.  The user can't tell
324
           the difference, since the zero register always reads zero.  */
325
        return NO_EXIT;
326
    }
327

    
328
#if defined(CONFIG_USER_ONLY)
329
    addr = cpu_lock_st_addr;
330
#else
331
    addr = tcg_temp_local_new();
332
#endif
333

    
334
    if (rb != 31) {
335
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
336
    } else {
337
        tcg_gen_movi_i64(addr, disp16);
338
    }
339

    
340
#if defined(CONFIG_USER_ONLY)
341
    /* ??? This is handled via a complicated version of compare-and-swap
342
       in the cpu_loop.  Hopefully one day we'll have a real CAS opcode
343
       in TCG so that this isn't necessary.  */
344
    return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
345
#else
346
    /* ??? In system mode we are never multi-threaded, so CAS can be
347
       implemented via a non-atomic load-compare-store sequence.  */
348
    {
349
        int lab_fail, lab_done;
350
        TCGv val;
351

    
352
        lab_fail = gen_new_label();
353
        lab_done = gen_new_label();
354
        tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
355

    
356
        val = tcg_temp_new();
357
        if (quad) {
358
            tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
359
        } else {
360
            tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
361
        }
362
        tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
363

    
364
        if (quad) {
365
            tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
366
        } else {
367
            tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
368
        }
369
        tcg_gen_movi_i64(cpu_ir[ra], 1);
370
        tcg_gen_br(lab_done);
371

    
372
        gen_set_label(lab_fail);
373
        tcg_gen_movi_i64(cpu_ir[ra], 0);
374

    
375
        gen_set_label(lab_done);
376
        tcg_gen_movi_i64(cpu_lock_addr, -1);
377

    
378
        tcg_temp_free(addr);
379
        return NO_EXIT;
380
    }
381
#endif
382
}
383

    
384
static int use_goto_tb(DisasContext *ctx, uint64_t dest)
385
{
386
    /* Check for the dest on the same page as the start of the TB.  We
387
       also want to suppress goto_tb in the case of single-steping and IO.  */
388
    return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
389
            && !ctx->env->singlestep_enabled
390
            && !(ctx->tb->cflags & CF_LAST_IO));
391
}
392

    
393
static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
394
{
395
    uint64_t dest = ctx->pc + (disp << 2);
396

    
397
    if (ra != 31) {
398
        tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
399
    }
400

    
401
    /* Notice branch-to-next; used to initialize RA with the PC.  */
402
    if (disp == 0) {
403
        return 0;
404
    } else if (use_goto_tb(ctx, dest)) {
405
        tcg_gen_goto_tb(0);
406
        tcg_gen_movi_i64(cpu_pc, dest);
407
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
408
        return EXIT_GOTO_TB;
409
    } else {
410
        tcg_gen_movi_i64(cpu_pc, dest);
411
        return EXIT_PC_UPDATED;
412
    }
413
}
414

    
415
static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
416
                                     TCGv cmp, int32_t disp)
417
{
418
    uint64_t dest = ctx->pc + (disp << 2);
419
    int lab_true = gen_new_label();
420

    
421
    if (use_goto_tb(ctx, dest)) {
422
        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
423

    
424
        tcg_gen_goto_tb(0);
425
        tcg_gen_movi_i64(cpu_pc, ctx->pc);
426
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
427

    
428
        gen_set_label(lab_true);
429
        tcg_gen_goto_tb(1);
430
        tcg_gen_movi_i64(cpu_pc, dest);
431
        tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
432

    
433
        return EXIT_GOTO_TB;
434
    } else {
435
        int lab_over = gen_new_label();
436

    
437
        /* ??? Consider using either
438
             movi pc, next
439
             addi tmp, pc, disp
440
             movcond pc, cond, 0, tmp, pc
441
           or
442
             setcond tmp, cond, 0
443
             movi pc, next
444
             neg tmp, tmp
445
             andi tmp, tmp, disp
446
             add pc, pc, tmp
447
           The current diamond subgraph surely isn't efficient.  */
448

    
449
        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
450
        tcg_gen_movi_i64(cpu_pc, ctx->pc);
451
        tcg_gen_br(lab_over);
452
        gen_set_label(lab_true);
453
        tcg_gen_movi_i64(cpu_pc, dest);
454
        gen_set_label(lab_over);
455

    
456
        return EXIT_PC_UPDATED;
457
    }
458
}
459

    
460
static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
461
                            int32_t disp, int mask)
462
{
463
    TCGv cmp_tmp;
464

    
465
    if (unlikely(ra == 31)) {
466
        cmp_tmp = tcg_const_i64(0);
467
    } else {
468
        cmp_tmp = tcg_temp_new();
469
        if (mask) {
470
            tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
471
        } else {
472
            tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
473
        }
474
    }
475

    
476
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
477
}
478

    
479
/* Fold -0.0 for comparison with COND.  */
480

    
481
static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
482
{
483
    uint64_t mzero = 1ull << 63;
484

    
485
    switch (cond) {
486
    case TCG_COND_LE:
487
    case TCG_COND_GT:
488
        /* For <= or >, the -0.0 value directly compares the way we want.  */
489
        tcg_gen_mov_i64(dest, src);
490
        break;
491

    
492
    case TCG_COND_EQ:
493
    case TCG_COND_NE:
494
        /* For == or !=, we can simply mask off the sign bit and compare.  */
495
        tcg_gen_andi_i64(dest, src, mzero - 1);
496
        break;
497

    
498
    case TCG_COND_GE:
499
    case TCG_COND_LT:
500
        /* For >= or <, map -0.0 to +0.0 via comparison and mask.  */
501
        tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
502
        tcg_gen_neg_i64(dest, dest);
503
        tcg_gen_and_i64(dest, dest, src);
504
        break;
505

    
506
    default:
507
        abort();
508
    }
509
}
510

    
511
static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
512
                             int32_t disp)
513
{
514
    TCGv cmp_tmp;
515

    
516
    if (unlikely(ra == 31)) {
517
        /* Very uncommon case, but easier to optimize it to an integer
518
           comparison than continuing with the floating point comparison.  */
519
        return gen_bcond(ctx, cond, ra, disp, 0);
520
    }
521

    
522
    cmp_tmp = tcg_temp_new();
523
    gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
524
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
525
}
526

    
527
static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
528
                     int islit, uint8_t lit, int mask)
529
{
530
    TCGCond inv_cond = tcg_invert_cond(cond);
531
    int l1;
532

    
533
    if (unlikely(rc == 31))
534
        return;
535

    
536
    l1 = gen_new_label();
537

    
538
    if (ra != 31) {
539
        if (mask) {
540
            TCGv tmp = tcg_temp_new();
541
            tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
542
            tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
543
            tcg_temp_free(tmp);
544
        } else
545
            tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
546
    } else {
547
        /* Very uncommon case - Do not bother to optimize.  */
548
        TCGv tmp = tcg_const_i64(0);
549
        tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
550
        tcg_temp_free(tmp);
551
    }
552

    
553
    if (islit)
554
        tcg_gen_movi_i64(cpu_ir[rc], lit);
555
    else
556
        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
557
    gen_set_label(l1);
558
}
559

    
560
static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
561
{
562
    TCGv cmp_tmp;
563
    int l1;
564

    
565
    if (unlikely(rc == 31)) {
566
        return;
567
    }
568

    
569
    cmp_tmp = tcg_temp_new();
570
    if (unlikely(ra == 31)) {
571
        tcg_gen_movi_i64(cmp_tmp, 0);
572
    } else {
573
        gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
574
    }
575

    
576
    l1 = gen_new_label();
577
    tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
578
    tcg_temp_free(cmp_tmp);
579

    
580
    if (rb != 31)
581
        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
582
    else
583
        tcg_gen_movi_i64(cpu_fir[rc], 0);
584
    gen_set_label(l1);
585
}
586

    
587
#define QUAL_RM_N       0x080   /* Round mode nearest even */
588
#define QUAL_RM_C       0x000   /* Round mode chopped */
589
#define QUAL_RM_M       0x040   /* Round mode minus infinity */
590
#define QUAL_RM_D       0x0c0   /* Round mode dynamic */
591
#define QUAL_RM_MASK    0x0c0
592

    
593
#define QUAL_U          0x100   /* Underflow enable (fp output) */
594
#define QUAL_V          0x100   /* Overflow enable (int output) */
595
#define QUAL_S          0x400   /* Software completion enable */
596
#define QUAL_I          0x200   /* Inexact detection enable */
597

    
598
static void gen_qual_roundmode(DisasContext *ctx, int fn11)
599
{
600
    TCGv_i32 tmp;
601

    
602
    fn11 &= QUAL_RM_MASK;
603
    if (fn11 == ctx->tb_rm) {
604
        return;
605
    }
606
    ctx->tb_rm = fn11;
607

    
608
    tmp = tcg_temp_new_i32();
609
    switch (fn11) {
610
    case QUAL_RM_N:
611
        tcg_gen_movi_i32(tmp, float_round_nearest_even);
612
        break;
613
    case QUAL_RM_C:
614
        tcg_gen_movi_i32(tmp, float_round_to_zero);
615
        break;
616
    case QUAL_RM_M:
617
        tcg_gen_movi_i32(tmp, float_round_down);
618
        break;
619
    case QUAL_RM_D:
620
        tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
621
        break;
622
    }
623

    
624
#if defined(CONFIG_SOFTFLOAT_INLINE)
625
    /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
626
       With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
627
       sets the one field.  */
628
    tcg_gen_st8_i32(tmp, cpu_env,
629
                    offsetof(CPUState, fp_status.float_rounding_mode));
630
#else
631
    gen_helper_setroundmode(tmp);
632
#endif
633

    
634
    tcg_temp_free_i32(tmp);
635
}
636

    
637
static void gen_qual_flushzero(DisasContext *ctx, int fn11)
638
{
639
    TCGv_i32 tmp;
640

    
641
    fn11 &= QUAL_U;
642
    if (fn11 == ctx->tb_ftz) {
643
        return;
644
    }
645
    ctx->tb_ftz = fn11;
646

    
647
    tmp = tcg_temp_new_i32();
648
    if (fn11) {
649
        /* Underflow is enabled, use the FPCR setting.  */
650
        tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
651
    } else {
652
        /* Underflow is disabled, force flush-to-zero.  */
653
        tcg_gen_movi_i32(tmp, 1);
654
    }
655

    
656
#if defined(CONFIG_SOFTFLOAT_INLINE)
657
    tcg_gen_st8_i32(tmp, cpu_env,
658
                    offsetof(CPUState, fp_status.flush_to_zero));
659
#else
660
    gen_helper_setflushzero(tmp);
661
#endif
662

    
663
    tcg_temp_free_i32(tmp);
664
}
665

    
666
static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
667
{
668
    TCGv val = tcg_temp_new();
669
    if (reg == 31) {
670
        tcg_gen_movi_i64(val, 0);
671
    } else if (fn11 & QUAL_S) {
672
        gen_helper_ieee_input_s(val, cpu_fir[reg]);
673
    } else if (is_cmp) {
674
        gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
675
    } else {
676
        gen_helper_ieee_input(val, cpu_fir[reg]);
677
    }
678
    return val;
679
}
680

    
681
static void gen_fp_exc_clear(void)
682
{
683
#if defined(CONFIG_SOFTFLOAT_INLINE)
684
    TCGv_i32 zero = tcg_const_i32(0);
685
    tcg_gen_st8_i32(zero, cpu_env,
686
                    offsetof(CPUState, fp_status.float_exception_flags));
687
    tcg_temp_free_i32(zero);
688
#else
689
    gen_helper_fp_exc_clear();
690
#endif
691
}
692

    
693
static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
694
{
695
    /* ??? We ought to be able to do something with imprecise exceptions.
696
       E.g. notice we're still in the trap shadow of something within the
697
       TB and do not generate the code to signal the exception; end the TB
698
       when an exception is forced to arrive, either by consumption of a
699
       register value or TRAPB or EXCB.  */
700
    TCGv_i32 exc = tcg_temp_new_i32();
701
    TCGv_i32 reg;
702

    
703
#if defined(CONFIG_SOFTFLOAT_INLINE)
704
    tcg_gen_ld8u_i32(exc, cpu_env,
705
                     offsetof(CPUState, fp_status.float_exception_flags));
706
#else
707
    gen_helper_fp_exc_get(exc);
708
#endif
709

    
710
    if (ignore) {
711
        tcg_gen_andi_i32(exc, exc, ~ignore);
712
    }
713

    
714
    /* ??? Pass in the regno of the destination so that the helper can
715
       set EXC_MASK, which contains a bitmask of destination registers
716
       that have caused arithmetic traps.  A simple userspace emulation
717
       does not require this.  We do need it for a guest kernel's entArith,
718
       or if we were to do something clever with imprecise exceptions.  */
719
    reg = tcg_const_i32(rc + 32);
720

    
721
    if (fn11 & QUAL_S) {
722
        gen_helper_fp_exc_raise_s(exc, reg);
723
    } else {
724
        gen_helper_fp_exc_raise(exc, reg);
725
    }
726

    
727
    tcg_temp_free_i32(reg);
728
    tcg_temp_free_i32(exc);
729
}
730

    
731
static inline void gen_fp_exc_raise(int rc, int fn11)
732
{
733
    gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
734
}
735

    
736
static void gen_fcvtlq(int rb, int rc)
737
{
738
    if (unlikely(rc == 31)) {
739
        return;
740
    }
741
    if (unlikely(rb == 31)) {
742
        tcg_gen_movi_i64(cpu_fir[rc], 0);
743
    } else {
744
        TCGv tmp = tcg_temp_new();
745

    
746
        /* The arithmetic right shift here, plus the sign-extended mask below
747
           yields a sign-extended result without an explicit ext32s_i64.  */
748
        tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
749
        tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
750
        tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
751
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
752
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
753

    
754
        tcg_temp_free(tmp);
755
    }
756
}
757

    
758
static void gen_fcvtql(int rb, int rc)
759
{
760
    if (unlikely(rc == 31)) {
761
        return;
762
    }
763
    if (unlikely(rb == 31)) {
764
        tcg_gen_movi_i64(cpu_fir[rc], 0);
765
    } else {
766
        TCGv tmp = tcg_temp_new();
767

    
768
        tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
769
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
770
        tcg_gen_shli_i64(tmp, tmp, 32);
771
        tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
772
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
773

    
774
        tcg_temp_free(tmp);
775
    }
776
}
777

    
778
static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
779
{
780
    if (rb != 31) {
781
        int lab = gen_new_label();
782
        TCGv tmp = tcg_temp_new();
783

    
784
        tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
785
        tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
786
        gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
787

    
788
        gen_set_label(lab);
789
    }
790
    gen_fcvtql(rb, rc);
791
}
792

    
793
#define FARITH2(name)                                   \
794
static inline void glue(gen_f, name)(int rb, int rc)    \
795
{                                                       \
796
    if (unlikely(rc == 31)) {                           \
797
        return;                                         \
798
    }                                                   \
799
    if (rb != 31) {                                     \
800
        gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
801
    } else {                                                \
802
        TCGv tmp = tcg_const_i64(0);                    \
803
        gen_helper_ ## name (cpu_fir[rc], tmp);         \
804
        tcg_temp_free(tmp);                             \
805
    }                                                   \
806
}
807

    
808
/* ??? VAX instruction qualifiers ignored.  */
809
FARITH2(sqrtf)
810
FARITH2(sqrtg)
811
FARITH2(cvtgf)
812
FARITH2(cvtgq)
813
FARITH2(cvtqf)
814
FARITH2(cvtqg)
815

    
816
static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
817
                            int rb, int rc, int fn11)
818
{
819
    TCGv vb;
820

    
821
    /* ??? This is wrong: the instruction is not a nop, it still may
822
       raise exceptions.  */
823
    if (unlikely(rc == 31)) {
824
        return;
825
    }
826

    
827
    gen_qual_roundmode(ctx, fn11);
828
    gen_qual_flushzero(ctx, fn11);
829
    gen_fp_exc_clear();
830

    
831
    vb = gen_ieee_input(rb, fn11, 0);
832
    helper(cpu_fir[rc], vb);
833
    tcg_temp_free(vb);
834

    
835
    gen_fp_exc_raise(rc, fn11);
836
}
837

    
838
#define IEEE_ARITH2(name)                                       \
839
static inline void glue(gen_f, name)(DisasContext *ctx,         \
840
                                     int rb, int rc, int fn11)  \
841
{                                                               \
842
    gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
843
}
844
IEEE_ARITH2(sqrts)
845
IEEE_ARITH2(sqrtt)
846
IEEE_ARITH2(cvtst)
847
IEEE_ARITH2(cvtts)
848

    
849
static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
850
{
851
    TCGv vb;
852
    int ignore = 0;
853

    
854
    /* ??? This is wrong: the instruction is not a nop, it still may
855
       raise exceptions.  */
856
    if (unlikely(rc == 31)) {
857
        return;
858
    }
859

    
860
    /* No need to set flushzero, since we have an integer output.  */
861
    gen_fp_exc_clear();
862
    vb = gen_ieee_input(rb, fn11, 0);
863

    
864
    /* Almost all integer conversions use cropped rounding, and most
865
       also do not have integer overflow enabled.  Special case that.  */
866
    switch (fn11) {
867
    case QUAL_RM_C:
868
        gen_helper_cvttq_c(cpu_fir[rc], vb);
869
        break;
870
    case QUAL_V | QUAL_RM_C:
871
    case QUAL_S | QUAL_V | QUAL_RM_C:
872
        ignore = float_flag_inexact;
873
        /* FALLTHRU */
874
    case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
875
        gen_helper_cvttq_svic(cpu_fir[rc], vb);
876
        break;
877
    default:
878
        gen_qual_roundmode(ctx, fn11);
879
        gen_helper_cvttq(cpu_fir[rc], vb);
880
        ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
881
        ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
882
        break;
883
    }
884
    tcg_temp_free(vb);
885

    
886
    gen_fp_exc_raise_ignore(rc, fn11, ignore);
887
}
888

    
889
static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
890
                            int rb, int rc, int fn11)
891
{
892
    TCGv vb;
893

    
894
    /* ??? This is wrong: the instruction is not a nop, it still may
895
       raise exceptions.  */
896
    if (unlikely(rc == 31)) {
897
        return;
898
    }
899

    
900
    gen_qual_roundmode(ctx, fn11);
901

    
902
    if (rb == 31) {
903
        vb = tcg_const_i64(0);
904
    } else {
905
        vb = cpu_fir[rb];
906
    }
907

    
908
    /* The only exception that can be raised by integer conversion
909
       is inexact.  Thus we only need to worry about exceptions when
910
       inexact handling is requested.  */
911
    if (fn11 & QUAL_I) {
912
        gen_fp_exc_clear();
913
        helper(cpu_fir[rc], vb);
914
        gen_fp_exc_raise(rc, fn11);
915
    } else {
916
        helper(cpu_fir[rc], vb);
917
    }
918

    
919
    if (rb == 31) {
920
        tcg_temp_free(vb);
921
    }
922
}
923

    
924
#define IEEE_INTCVT(name)                                       \
925
static inline void glue(gen_f, name)(DisasContext *ctx,         \
926
                                     int rb, int rc, int fn11)  \
927
{                                                               \
928
    gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
929
}
930
IEEE_INTCVT(cvtqs)
931
IEEE_INTCVT(cvtqt)
932

    
933
static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
934
{
935
    TCGv va, vb, vmask;
936
    int za = 0, zb = 0;
937

    
938
    if (unlikely(rc == 31)) {
939
        return;
940
    }
941

    
942
    vmask = tcg_const_i64(mask);
943

    
944
    TCGV_UNUSED_I64(va);
945
    if (ra == 31) {
946
        if (inv_a) {
947
            va = vmask;
948
        } else {
949
            za = 1;
950
        }
951
    } else {
952
        va = tcg_temp_new_i64();
953
        tcg_gen_mov_i64(va, cpu_fir[ra]);
954
        if (inv_a) {
955
            tcg_gen_andc_i64(va, vmask, va);
956
        } else {
957
            tcg_gen_and_i64(va, va, vmask);
958
        }
959
    }
960

    
961
    TCGV_UNUSED_I64(vb);
962
    if (rb == 31) {
963
        zb = 1;
964
    } else {
965
        vb = tcg_temp_new_i64();
966
        tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
967
    }
968

    
969
    switch (za << 1 | zb) {
970
    case 0 | 0:
971
        tcg_gen_or_i64(cpu_fir[rc], va, vb);
972
        break;
973
    case 0 | 1:
974
        tcg_gen_mov_i64(cpu_fir[rc], va);
975
        break;
976
    case 2 | 0:
977
        tcg_gen_mov_i64(cpu_fir[rc], vb);
978
        break;
979
    case 2 | 1:
980
        tcg_gen_movi_i64(cpu_fir[rc], 0);
981
        break;
982
    }
983

    
984
    tcg_temp_free(vmask);
985
    if (ra != 31) {
986
        tcg_temp_free(va);
987
    }
988
    if (rb != 31) {
989
        tcg_temp_free(vb);
990
    }
991
}
992

    
993
static inline void gen_fcpys(int ra, int rb, int rc)
994
{
995
    gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
996
}
997

    
998
static inline void gen_fcpysn(int ra, int rb, int rc)
999
{
1000
    gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1001
}
1002

    
1003
static inline void gen_fcpyse(int ra, int rb, int rc)
1004
{
1005
    gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1006
}
1007

    
1008
#define FARITH3(name)                                           \
1009
static inline void glue(gen_f, name)(int ra, int rb, int rc)    \
1010
{                                                               \
1011
    TCGv va, vb;                                                \
1012
                                                                \
1013
    if (unlikely(rc == 31)) {                                   \
1014
        return;                                                 \
1015
    }                                                           \
1016
    if (ra == 31) {                                             \
1017
        va = tcg_const_i64(0);                                  \
1018
    } else {                                                    \
1019
        va = cpu_fir[ra];                                       \
1020
    }                                                           \
1021
    if (rb == 31) {                                             \
1022
        vb = tcg_const_i64(0);                                  \
1023
    } else {                                                    \
1024
        vb = cpu_fir[rb];                                       \
1025
    }                                                           \
1026
                                                                \
1027
    gen_helper_ ## name (cpu_fir[rc], va, vb);                  \
1028
                                                                \
1029
    if (ra == 31) {                                             \
1030
        tcg_temp_free(va);                                      \
1031
    }                                                           \
1032
    if (rb == 31) {                                             \
1033
        tcg_temp_free(vb);                                      \
1034
    }                                                           \
1035
}
1036

    
1037
/* ??? VAX instruction qualifiers ignored.  */
1038
FARITH3(addf)
1039
FARITH3(subf)
1040
FARITH3(mulf)
1041
FARITH3(divf)
1042
FARITH3(addg)
1043
FARITH3(subg)
1044
FARITH3(mulg)
1045
FARITH3(divg)
1046
FARITH3(cmpgeq)
1047
FARITH3(cmpglt)
1048
FARITH3(cmpgle)
1049

    
1050
static void gen_ieee_arith3(DisasContext *ctx,
1051
                            void (*helper)(TCGv, TCGv, TCGv),
1052
                            int ra, int rb, int rc, int fn11)
1053
{
1054
    TCGv va, vb;
1055

    
1056
    /* ??? This is wrong: the instruction is not a nop, it still may
1057
       raise exceptions.  */
1058
    if (unlikely(rc == 31)) {
1059
        return;
1060
    }
1061

    
1062
    gen_qual_roundmode(ctx, fn11);
1063
    gen_qual_flushzero(ctx, fn11);
1064
    gen_fp_exc_clear();
1065

    
1066
    va = gen_ieee_input(ra, fn11, 0);
1067
    vb = gen_ieee_input(rb, fn11, 0);
1068
    helper(cpu_fir[rc], va, vb);
1069
    tcg_temp_free(va);
1070
    tcg_temp_free(vb);
1071

    
1072
    gen_fp_exc_raise(rc, fn11);
1073
}
1074

    
1075
#define IEEE_ARITH3(name)                                               \
1076
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1077
                                     int ra, int rb, int rc, int fn11)  \
1078
{                                                                       \
1079
    gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
1080
}
1081
IEEE_ARITH3(adds)
1082
IEEE_ARITH3(subs)
1083
IEEE_ARITH3(muls)
1084
IEEE_ARITH3(divs)
1085
IEEE_ARITH3(addt)
1086
IEEE_ARITH3(subt)
1087
IEEE_ARITH3(mult)
1088
IEEE_ARITH3(divt)
1089

    
1090
static void gen_ieee_compare(DisasContext *ctx,
1091
                             void (*helper)(TCGv, TCGv, TCGv),
1092
                             int ra, int rb, int rc, int fn11)
1093
{
1094
    TCGv va, vb;
1095

    
1096
    /* ??? This is wrong: the instruction is not a nop, it still may
1097
       raise exceptions.  */
1098
    if (unlikely(rc == 31)) {
1099
        return;
1100
    }
1101

    
1102
    gen_fp_exc_clear();
1103

    
1104
    va = gen_ieee_input(ra, fn11, 1);
1105
    vb = gen_ieee_input(rb, fn11, 1);
1106
    helper(cpu_fir[rc], va, vb);
1107
    tcg_temp_free(va);
1108
    tcg_temp_free(vb);
1109

    
1110
    gen_fp_exc_raise(rc, fn11);
1111
}
1112

    
1113
#define IEEE_CMP3(name)                                                 \
1114
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1115
                                     int ra, int rb, int rc, int fn11)  \
1116
{                                                                       \
1117
    gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
1118
}
1119
IEEE_CMP3(cmptun)
1120
IEEE_CMP3(cmpteq)
1121
IEEE_CMP3(cmptlt)
1122
IEEE_CMP3(cmptle)
1123

    
1124
static inline uint64_t zapnot_mask(uint8_t lit)
1125
{
1126
    uint64_t mask = 0;
1127
    int i;
1128

    
1129
    for (i = 0; i < 8; ++i) {
1130
        if ((lit >> i) & 1)
1131
            mask |= 0xffull << (i * 8);
1132
    }
1133
    return mask;
1134
}
1135

    
1136
/* Implement zapnot with an immediate operand, which expands to some
1137
   form of immediate AND.  This is a basic building block in the
1138
   definition of many of the other byte manipulation instructions.  */
1139
static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1140
{
1141
    switch (lit) {
1142
    case 0x00:
1143
        tcg_gen_movi_i64(dest, 0);
1144
        break;
1145
    case 0x01:
1146
        tcg_gen_ext8u_i64(dest, src);
1147
        break;
1148
    case 0x03:
1149
        tcg_gen_ext16u_i64(dest, src);
1150
        break;
1151
    case 0x0f:
1152
        tcg_gen_ext32u_i64(dest, src);
1153
        break;
1154
    case 0xff:
1155
        tcg_gen_mov_i64(dest, src);
1156
        break;
1157
    default:
1158
        tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1159
        break;
1160
    }
1161
}
1162

    
1163
static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1164
{
1165
    if (unlikely(rc == 31))
1166
        return;
1167
    else if (unlikely(ra == 31))
1168
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1169
    else if (islit)
1170
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1171
    else
1172
        gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1173
}
1174

    
1175
static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1176
{
1177
    if (unlikely(rc == 31))
1178
        return;
1179
    else if (unlikely(ra == 31))
1180
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1181
    else if (islit)
1182
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1183
    else
1184
        gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1185
}
1186

    
1187

    
1188
/* EXTWH, EXTLH, EXTQH */
1189
static void gen_ext_h(int ra, int rb, int rc, int islit,
1190
                      uint8_t lit, uint8_t byte_mask)
1191
{
1192
    if (unlikely(rc == 31))
1193
        return;
1194
    else if (unlikely(ra == 31))
1195
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1196
    else {
1197
        if (islit) {
1198
            lit = (64 - (lit & 7) * 8) & 0x3f;
1199
            tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1200
        } else {
1201
            TCGv tmp1 = tcg_temp_new();
1202
            tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1203
            tcg_gen_shli_i64(tmp1, tmp1, 3);
1204
            tcg_gen_neg_i64(tmp1, tmp1);
1205
            tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1206
            tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1207
            tcg_temp_free(tmp1);
1208
        }
1209
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1210
    }
1211
}
1212

    
1213
/* EXTBL, EXTWL, EXTLL, EXTQL */
1214
static void gen_ext_l(int ra, int rb, int rc, int islit,
1215
                      uint8_t lit, uint8_t byte_mask)
1216
{
1217
    if (unlikely(rc == 31))
1218
        return;
1219
    else if (unlikely(ra == 31))
1220
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1221
    else {
1222
        if (islit) {
1223
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1224
        } else {
1225
            TCGv tmp = tcg_temp_new();
1226
            tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1227
            tcg_gen_shli_i64(tmp, tmp, 3);
1228
            tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1229
            tcg_temp_free(tmp);
1230
        }
1231
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1232
    }
1233
}
1234

    
1235
/* INSWH, INSLH, INSQH */
1236
static void gen_ins_h(int ra, int rb, int rc, int islit,
1237
                      uint8_t lit, uint8_t byte_mask)
1238
{
1239
    if (unlikely(rc == 31))
1240
        return;
1241
    else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1242
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1243
    else {
1244
        TCGv tmp = tcg_temp_new();
1245

    
1246
        /* The instruction description has us left-shift the byte mask
1247
           and extract bits <15:8> and apply that zap at the end.  This
1248
           is equivalent to simply performing the zap first and shifting
1249
           afterward.  */
1250
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1251

    
1252
        if (islit) {
1253
            /* Note that we have handled the lit==0 case above.  */
1254
            tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1255
        } else {
1256
            TCGv shift = tcg_temp_new();
1257

    
1258
            /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1259
               Do this portably by splitting the shift into two parts:
1260
               shift_count-1 and 1.  Arrange for the -1 by using
1261
               ones-complement instead of twos-complement in the negation:
1262
               ~((B & 7) * 8) & 63.  */
1263

    
1264
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1265
            tcg_gen_shli_i64(shift, shift, 3);
1266
            tcg_gen_not_i64(shift, shift);
1267
            tcg_gen_andi_i64(shift, shift, 0x3f);
1268

    
1269
            tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1270
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1271
            tcg_temp_free(shift);
1272
        }
1273
        tcg_temp_free(tmp);
1274
    }
1275
}
1276

    
1277
/* INSBL, INSWL, INSLL, INSQL */
1278
static void gen_ins_l(int ra, int rb, int rc, int islit,
1279
                      uint8_t lit, uint8_t byte_mask)
1280
{
1281
    if (unlikely(rc == 31))
1282
        return;
1283
    else if (unlikely(ra == 31))
1284
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1285
    else {
1286
        TCGv tmp = tcg_temp_new();
1287

    
1288
        /* The instruction description has us left-shift the byte mask
1289
           the same number of byte slots as the data and apply the zap
1290
           at the end.  This is equivalent to simply performing the zap
1291
           first and shifting afterward.  */
1292
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1293

    
1294
        if (islit) {
1295
            tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1296
        } else {
1297
            TCGv shift = tcg_temp_new();
1298
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1299
            tcg_gen_shli_i64(shift, shift, 3);
1300
            tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1301
            tcg_temp_free(shift);
1302
        }
1303
        tcg_temp_free(tmp);
1304
    }
1305
}
1306

    
1307
/* MSKWH, MSKLH, MSKQH */
1308
static void gen_msk_h(int ra, int rb, int rc, int islit,
1309
                      uint8_t lit, uint8_t byte_mask)
1310
{
1311
    if (unlikely(rc == 31))
1312
        return;
1313
    else if (unlikely(ra == 31))
1314
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1315
    else if (islit) {
1316
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1317
    } else {
1318
        TCGv shift = tcg_temp_new();
1319
        TCGv mask = tcg_temp_new();
1320

    
1321
        /* The instruction description is as above, where the byte_mask
1322
           is shifted left, and then we extract bits <15:8>.  This can be
1323
           emulated with a right-shift on the expanded byte mask.  This
1324
           requires extra care because for an input <2:0> == 0 we need a
1325
           shift of 64 bits in order to generate a zero.  This is done by
1326
           splitting the shift into two parts, the variable shift - 1
1327
           followed by a constant 1 shift.  The code we expand below is
1328
           equivalent to ~((B & 7) * 8) & 63.  */
1329

    
1330
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1331
        tcg_gen_shli_i64(shift, shift, 3);
1332
        tcg_gen_not_i64(shift, shift);
1333
        tcg_gen_andi_i64(shift, shift, 0x3f);
1334
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1335
        tcg_gen_shr_i64(mask, mask, shift);
1336
        tcg_gen_shri_i64(mask, mask, 1);
1337

    
1338
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1339

    
1340
        tcg_temp_free(mask);
1341
        tcg_temp_free(shift);
1342
    }
1343
}
1344

    
1345
/* MSKBL, MSKWL, MSKLL, MSKQL */
1346
static void gen_msk_l(int ra, int rb, int rc, int islit,
1347
                      uint8_t lit, uint8_t byte_mask)
1348
{
1349
    if (unlikely(rc == 31))
1350
        return;
1351
    else if (unlikely(ra == 31))
1352
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1353
    else if (islit) {
1354
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1355
    } else {
1356
        TCGv shift = tcg_temp_new();
1357
        TCGv mask = tcg_temp_new();
1358

    
1359
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1360
        tcg_gen_shli_i64(shift, shift, 3);
1361
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1362
        tcg_gen_shl_i64(mask, mask, shift);
1363

    
1364
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1365

    
1366
        tcg_temp_free(mask);
1367
        tcg_temp_free(shift);
1368
    }
1369
}
1370

    
1371
/* Code to call arith3 helpers */
1372
#define ARITH3(name)                                                  \
1373
static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1374
                                    uint8_t lit)                      \
1375
{                                                                     \
1376
    if (unlikely(rc == 31))                                           \
1377
        return;                                                       \
1378
                                                                      \
1379
    if (ra != 31) {                                                   \
1380
        if (islit) {                                                  \
1381
            TCGv tmp = tcg_const_i64(lit);                            \
1382
            gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp);         \
1383
            tcg_temp_free(tmp);                                       \
1384
        } else                                                        \
1385
            gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1386
    } else {                                                          \
1387
        TCGv tmp1 = tcg_const_i64(0);                                 \
1388
        if (islit) {                                                  \
1389
            TCGv tmp2 = tcg_const_i64(lit);                           \
1390
            gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2);             \
1391
            tcg_temp_free(tmp2);                                      \
1392
        } else                                                        \
1393
            gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]);       \
1394
        tcg_temp_free(tmp1);                                          \
1395
    }                                                                 \
1396
}
1397
ARITH3(cmpbge)
1398
ARITH3(addlv)
1399
ARITH3(sublv)
1400
ARITH3(addqv)
1401
ARITH3(subqv)
1402
ARITH3(umulh)
1403
ARITH3(mullv)
1404
ARITH3(mulqv)
1405
ARITH3(minub8)
1406
ARITH3(minsb8)
1407
ARITH3(minuw4)
1408
ARITH3(minsw4)
1409
ARITH3(maxub8)
1410
ARITH3(maxsb8)
1411
ARITH3(maxuw4)
1412
ARITH3(maxsw4)
1413
ARITH3(perr)
1414

    
1415
#define MVIOP2(name)                                    \
1416
static inline void glue(gen_, name)(int rb, int rc)     \
1417
{                                                       \
1418
    if (unlikely(rc == 31))                             \
1419
        return;                                         \
1420
    if (unlikely(rb == 31))                             \
1421
        tcg_gen_movi_i64(cpu_ir[rc], 0);                \
1422
    else                                                \
1423
        gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]);   \
1424
}
1425
MVIOP2(pklb)
1426
MVIOP2(pkwb)
1427
MVIOP2(unpkbl)
1428
MVIOP2(unpkbw)
1429

    
1430
static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1431
                    int islit, uint8_t lit)
1432
{
1433
    TCGv va, vb;
1434

    
1435
    if (unlikely(rc == 31)) {
1436
        return;
1437
    }
1438

    
1439
    if (ra == 31) {
1440
        va = tcg_const_i64(0);
1441
    } else {
1442
        va = cpu_ir[ra];
1443
    }
1444
    if (islit) {
1445
        vb = tcg_const_i64(lit);
1446
    } else {
1447
        vb = cpu_ir[rb];
1448
    }
1449

    
1450
    tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1451

    
1452
    if (ra == 31) {
1453
        tcg_temp_free(va);
1454
    }
1455
    if (islit) {
1456
        tcg_temp_free(vb);
1457
    }
1458
}
1459

    
1460
static void gen_rx(int ra, int set)
1461
{
1462
    TCGv_i32 tmp;
1463

    
1464
    if (ra != 31) {
1465
        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1466
    }
1467

    
1468
    tmp = tcg_const_i32(set);
1469
    tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1470
    tcg_temp_free_i32(tmp);
1471
}
1472

    
1473
static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1474
{
1475
    /* We're emulating OSF/1 PALcode.  Many of these are trivial access
1476
       to internal cpu registers.  */
1477

    
1478
    /* Unprivileged PAL call */
1479
    if (palcode >= 0x80 && palcode < 0xC0) {
1480
        switch (palcode) {
1481
        case 0x86:
1482
            /* IMB */
1483
            /* No-op inside QEMU.  */
1484
            break;
1485
        case 0x9E:
1486
            /* RDUNIQUE */
1487
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1488
            break;
1489
        case 0x9F:
1490
            /* WRUNIQUE */
1491
            tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1492
            break;
1493
        default:
1494
            return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1495
        }
1496
        return NO_EXIT;
1497
    }
1498

    
1499
#ifndef CONFIG_USER_ONLY
1500
    /* Privileged PAL code */
1501
    if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1502
        switch (palcode) {
1503
        case 0x01:
1504
            /* CFLUSH */
1505
            /* No-op inside QEMU.  */
1506
            break;
1507
        case 0x02:
1508
            /* DRAINA */
1509
            /* No-op inside QEMU.  */
1510
            break;
1511
        case 0x2D:
1512
            /* WRVPTPTR */
1513
            tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUState, vptptr));
1514
            break;
1515
        case 0x31:
1516
            /* WRVAL */
1517
            tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1518
            break;
1519
        case 0x32:
1520
            /* RDVAL */
1521
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1522
            break;
1523

    
1524
        case 0x35: {
1525
            /* SWPIPL */
1526
            TCGv tmp;
1527

    
1528
            /* Note that we already know we're in kernel mode, so we know
1529
               that PS only contains the 3 IPL bits.  */
1530
            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1531

    
1532
            /* But make sure and store only the 3 IPL bits from the user.  */
1533
            tmp = tcg_temp_new();
1534
            tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1535
            tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUState, ps));
1536
            tcg_temp_free(tmp);
1537
            break;
1538
        }
1539

    
1540
        case 0x36:
1541
            /* RDPS */
1542
            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1543
            break;
1544
        case 0x38:
1545
            /* WRUSP */
1546
            tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1547
            break;
1548
        case 0x3A:
1549
            /* RDUSP */
1550
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1551
            break;
1552
        case 0x3C:
1553
            /* WHAMI */
1554
            tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1555
                              offsetof(CPUState, cpu_index));
1556
            break;
1557

    
1558
        default:
1559
            return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1560
        }
1561
        return NO_EXIT;
1562
    }
1563
#endif
1564

    
1565
    return gen_invalid(ctx);
1566
}
1567

    
1568
#ifndef CONFIG_USER_ONLY
1569

    
1570
#define PR_BYTE         0x100000
1571
#define PR_LONG         0x200000
1572

    
1573
static int cpu_pr_data(int pr)
1574
{
1575
    switch (pr) {
1576
    case  0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1577
    case  1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1578
    case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1579
    case  3: return offsetof(CPUAlphaState, trap_arg0);
1580
    case  4: return offsetof(CPUAlphaState, trap_arg1);
1581
    case  5: return offsetof(CPUAlphaState, trap_arg2);
1582
    case  6: return offsetof(CPUAlphaState, exc_addr);
1583
    case  7: return offsetof(CPUAlphaState, palbr);
1584
    case  8: return offsetof(CPUAlphaState, ptbr);
1585
    case  9: return offsetof(CPUAlphaState, vptptr);
1586
    case 10: return offsetof(CPUAlphaState, unique);
1587
    case 11: return offsetof(CPUAlphaState, sysval);
1588
    case 12: return offsetof(CPUAlphaState, usp);
1589

    
1590
    case 32 ... 39:
1591
        return offsetof(CPUAlphaState, shadow[pr - 32]);
1592
    case 40 ... 63:
1593
        return offsetof(CPUAlphaState, scratch[pr - 40]);
1594
    }
1595
    return 0;
1596
}
1597

    
1598
static void gen_mfpr(int ra, int regno)
1599
{
1600
    int data = cpu_pr_data(regno);
1601

    
1602
    /* In our emulated PALcode, these processor registers have no
1603
       side effects from reading.  */
1604
    if (ra == 31) {
1605
        return;
1606
    }
1607

    
1608
    /* The basic registers are data only, and unknown registers
1609
       are read-zero, write-ignore.  */
1610
    if (data == 0) {
1611
        tcg_gen_movi_i64(cpu_ir[ra], 0);
1612
    } else if (data & PR_BYTE) {
1613
        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1614
    } else if (data & PR_LONG) {
1615
        tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1616
    } else {
1617
        tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1618
    }
1619
}
1620

    
1621
static void gen_mtpr(int rb, int regno)
1622
{
1623
    TCGv tmp;
1624

    
1625
    if (rb == 31) {
1626
        tmp = tcg_const_i64(0);
1627
    } else {
1628
        tmp = cpu_ir[rb];
1629
    }
1630

    
1631
    /* These two register numbers perform a TLB cache flush.  Thankfully we
1632
       can only do this inside PALmode, which means that the current basic
1633
       block cannot be affected by the change in mappings.  */
1634
    if (regno == 255) {
1635
        /* TBIA */
1636
        gen_helper_tbia();
1637
    } else if (regno == 254) {
1638
        /* TBIS */
1639
        gen_helper_tbis(tmp);
1640
    } else {
1641
        /* The basic registers are data only, and unknown registers
1642
           are read-zero, write-ignore.  */
1643
        int data = cpu_pr_data(regno);
1644
        if (data != 0) {
1645
            if (data & PR_BYTE) {
1646
                tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1647
            } else if (data & PR_LONG) {
1648
                tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1649
            } else {
1650
                tcg_gen_st_i64(tmp, cpu_env, data);
1651
            }
1652
        }
1653
    }
1654

    
1655
    if (rb == 31) {
1656
        tcg_temp_free(tmp);
1657
    }
1658
}
1659
#endif /* !USER_ONLY*/
1660

    
1661
static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1662
{
1663
    uint32_t palcode;
1664
    int32_t disp21, disp16, disp12;
1665
    uint16_t fn11;
1666
    uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
1667
    uint8_t lit;
1668
    ExitStatus ret;
1669

    
1670
    /* Decode all instruction fields */
1671
    opc = insn >> 26;
1672
    ra = (insn >> 21) & 0x1F;
1673
    rb = (insn >> 16) & 0x1F;
1674
    rc = insn & 0x1F;
1675
    real_islit = islit = (insn >> 12) & 1;
1676
    if (rb == 31 && !islit) {
1677
        islit = 1;
1678
        lit = 0;
1679
    } else
1680
        lit = (insn >> 13) & 0xFF;
1681
    palcode = insn & 0x03FFFFFF;
1682
    disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1683
    disp16 = (int16_t)(insn & 0x0000FFFF);
1684
    disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1685
    fn11 = (insn >> 5) & 0x000007FF;
1686
    fpfn = fn11 & 0x3F;
1687
    fn7 = (insn >> 5) & 0x0000007F;
1688
    fn2 = (insn >> 5) & 0x00000003;
1689
    LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1690
              opc, ra, rb, rc, disp16);
1691

    
1692
    ret = NO_EXIT;
1693
    switch (opc) {
1694
    case 0x00:
1695
        /* CALL_PAL */
1696
        ret = gen_call_pal(ctx, palcode);
1697
        break;
1698
    case 0x01:
1699
        /* OPC01 */
1700
        goto invalid_opc;
1701
    case 0x02:
1702
        /* OPC02 */
1703
        goto invalid_opc;
1704
    case 0x03:
1705
        /* OPC03 */
1706
        goto invalid_opc;
1707
    case 0x04:
1708
        /* OPC04 */
1709
        goto invalid_opc;
1710
    case 0x05:
1711
        /* OPC05 */
1712
        goto invalid_opc;
1713
    case 0x06:
1714
        /* OPC06 */
1715
        goto invalid_opc;
1716
    case 0x07:
1717
        /* OPC07 */
1718
        goto invalid_opc;
1719
    case 0x08:
1720
        /* LDA */
1721
        if (likely(ra != 31)) {
1722
            if (rb != 31)
1723
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1724
            else
1725
                tcg_gen_movi_i64(cpu_ir[ra], disp16);
1726
        }
1727
        break;
1728
    case 0x09:
1729
        /* LDAH */
1730
        if (likely(ra != 31)) {
1731
            if (rb != 31)
1732
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1733
            else
1734
                tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1735
        }
1736
        break;
1737
    case 0x0A:
1738
        /* LDBU */
1739
        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1740
            gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1741
            break;
1742
        }
1743
        goto invalid_opc;
1744
    case 0x0B:
1745
        /* LDQ_U */
1746
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1747
        break;
1748
    case 0x0C:
1749
        /* LDWU */
1750
        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1751
            gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1752
            break;
1753
        }
1754
        goto invalid_opc;
1755
    case 0x0D:
1756
        /* STW */
1757
        gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1758
        break;
1759
    case 0x0E:
1760
        /* STB */
1761
        gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1762
        break;
1763
    case 0x0F:
1764
        /* STQ_U */
1765
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1766
        break;
1767
    case 0x10:
1768
        switch (fn7) {
1769
        case 0x00:
1770
            /* ADDL */
1771
            if (likely(rc != 31)) {
1772
                if (ra != 31) {
1773
                    if (islit) {
1774
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1775
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1776
                    } else {
1777
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1778
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1779
                    }
1780
                } else {
1781
                    if (islit)
1782
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1783
                    else
1784
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1785
                }
1786
            }
1787
            break;
1788
        case 0x02:
1789
            /* S4ADDL */
1790
            if (likely(rc != 31)) {
1791
                if (ra != 31) {
1792
                    TCGv tmp = tcg_temp_new();
1793
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1794
                    if (islit)
1795
                        tcg_gen_addi_i64(tmp, tmp, lit);
1796
                    else
1797
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1798
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1799
                    tcg_temp_free(tmp);
1800
                } else {
1801
                    if (islit)
1802
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1803
                    else
1804
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1805
                }
1806
            }
1807
            break;
1808
        case 0x09:
1809
            /* SUBL */
1810
            if (likely(rc != 31)) {
1811
                if (ra != 31) {
1812
                    if (islit)
1813
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1814
                    else
1815
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1816
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1817
                } else {
1818
                    if (islit)
1819
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1820
                    else {
1821
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1822
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1823
                }
1824
            }
1825
            break;
1826
        case 0x0B:
1827
            /* S4SUBL */
1828
            if (likely(rc != 31)) {
1829
                if (ra != 31) {
1830
                    TCGv tmp = tcg_temp_new();
1831
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1832
                    if (islit)
1833
                        tcg_gen_subi_i64(tmp, tmp, lit);
1834
                    else
1835
                        tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1836
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1837
                    tcg_temp_free(tmp);
1838
                } else {
1839
                    if (islit)
1840
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1841
                    else {
1842
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1843
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1844
                    }
1845
                }
1846
            }
1847
            break;
1848
        case 0x0F:
1849
            /* CMPBGE */
1850
            gen_cmpbge(ra, rb, rc, islit, lit);
1851
            break;
1852
        case 0x12:
1853
            /* S8ADDL */
1854
            if (likely(rc != 31)) {
1855
                if (ra != 31) {
1856
                    TCGv tmp = tcg_temp_new();
1857
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1858
                    if (islit)
1859
                        tcg_gen_addi_i64(tmp, tmp, lit);
1860
                    else
1861
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1862
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1863
                    tcg_temp_free(tmp);
1864
                } else {
1865
                    if (islit)
1866
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1867
                    else
1868
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1869
                }
1870
            }
1871
            break;
1872
        case 0x1B:
1873
            /* S8SUBL */
1874
            if (likely(rc != 31)) {
1875
                if (ra != 31) {
1876
                    TCGv tmp = tcg_temp_new();
1877
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1878
                    if (islit)
1879
                        tcg_gen_subi_i64(tmp, tmp, lit);
1880
                    else
1881
                       tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1882
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1883
                    tcg_temp_free(tmp);
1884
                } else {
1885
                    if (islit)
1886
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1887
                    else
1888
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1889
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1890
                    }
1891
                }
1892
            }
1893
            break;
1894
        case 0x1D:
1895
            /* CMPULT */
1896
            gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1897
            break;
1898
        case 0x20:
1899
            /* ADDQ */
1900
            if (likely(rc != 31)) {
1901
                if (ra != 31) {
1902
                    if (islit)
1903
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1904
                    else
1905
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1906
                } else {
1907
                    if (islit)
1908
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1909
                    else
1910
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1911
                }
1912
            }
1913
            break;
1914
        case 0x22:
1915
            /* S4ADDQ */
1916
            if (likely(rc != 31)) {
1917
                if (ra != 31) {
1918
                    TCGv tmp = tcg_temp_new();
1919
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1920
                    if (islit)
1921
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1922
                    else
1923
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1924
                    tcg_temp_free(tmp);
1925
                } else {
1926
                    if (islit)
1927
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1928
                    else
1929
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1930
                }
1931
            }
1932
            break;
1933
        case 0x29:
1934
            /* SUBQ */
1935
            if (likely(rc != 31)) {
1936
                if (ra != 31) {
1937
                    if (islit)
1938
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1939
                    else
1940
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1941
                } else {
1942
                    if (islit)
1943
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1944
                    else
1945
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1946
                }
1947
            }
1948
            break;
1949
        case 0x2B:
1950
            /* S4SUBQ */
1951
            if (likely(rc != 31)) {
1952
                if (ra != 31) {
1953
                    TCGv tmp = tcg_temp_new();
1954
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1955
                    if (islit)
1956
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1957
                    else
1958
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1959
                    tcg_temp_free(tmp);
1960
                } else {
1961
                    if (islit)
1962
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1963
                    else
1964
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1965
                }
1966
            }
1967
            break;
1968
        case 0x2D:
1969
            /* CMPEQ */
1970
            gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1971
            break;
1972
        case 0x32:
1973
            /* S8ADDQ */
1974
            if (likely(rc != 31)) {
1975
                if (ra != 31) {
1976
                    TCGv tmp = tcg_temp_new();
1977
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1978
                    if (islit)
1979
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1980
                    else
1981
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1982
                    tcg_temp_free(tmp);
1983
                } else {
1984
                    if (islit)
1985
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1986
                    else
1987
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1988
                }
1989
            }
1990
            break;
1991
        case 0x3B:
1992
            /* S8SUBQ */
1993
            if (likely(rc != 31)) {
1994
                if (ra != 31) {
1995
                    TCGv tmp = tcg_temp_new();
1996
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1997
                    if (islit)
1998
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1999
                    else
2000
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2001
                    tcg_temp_free(tmp);
2002
                } else {
2003
                    if (islit)
2004
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
2005
                    else
2006
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2007
                }
2008
            }
2009
            break;
2010
        case 0x3D:
2011
            /* CMPULE */
2012
            gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2013
            break;
2014
        case 0x40:
2015
            /* ADDL/V */
2016
            gen_addlv(ra, rb, rc, islit, lit);
2017
            break;
2018
        case 0x49:
2019
            /* SUBL/V */
2020
            gen_sublv(ra, rb, rc, islit, lit);
2021
            break;
2022
        case 0x4D:
2023
            /* CMPLT */
2024
            gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2025
            break;
2026
        case 0x60:
2027
            /* ADDQ/V */
2028
            gen_addqv(ra, rb, rc, islit, lit);
2029
            break;
2030
        case 0x69:
2031
            /* SUBQ/V */
2032
            gen_subqv(ra, rb, rc, islit, lit);
2033
            break;
2034
        case 0x6D:
2035
            /* CMPLE */
2036
            gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2037
            break;
2038
        default:
2039
            goto invalid_opc;
2040
        }
2041
        break;
2042
    case 0x11:
2043
        switch (fn7) {
2044
        case 0x00:
2045
            /* AND */
2046
            if (likely(rc != 31)) {
2047
                if (ra == 31)
2048
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2049
                else if (islit)
2050
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2051
                else
2052
                    tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2053
            }
2054
            break;
2055
        case 0x08:
2056
            /* BIC */
2057
            if (likely(rc != 31)) {
2058
                if (ra != 31) {
2059
                    if (islit)
2060
                        tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2061
                    else
2062
                        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2063
                } else
2064
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2065
            }
2066
            break;
2067
        case 0x14:
2068
            /* CMOVLBS */
2069
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2070
            break;
2071
        case 0x16:
2072
            /* CMOVLBC */
2073
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2074
            break;
2075
        case 0x20:
2076
            /* BIS */
2077
            if (likely(rc != 31)) {
2078
                if (ra != 31) {
2079
                    if (islit)
2080
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2081
                    else
2082
                        tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2083
                } else {
2084
                    if (islit)
2085
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2086
                    else
2087
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2088
                }
2089
            }
2090
            break;
2091
        case 0x24:
2092
            /* CMOVEQ */
2093
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2094
            break;
2095
        case 0x26:
2096
            /* CMOVNE */
2097
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2098
            break;
2099
        case 0x28:
2100
            /* ORNOT */
2101
            if (likely(rc != 31)) {
2102
                if (ra != 31) {
2103
                    if (islit)
2104
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2105
                    else
2106
                        tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2107
                } else {
2108
                    if (islit)
2109
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2110
                    else
2111
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2112
                }
2113
            }
2114
            break;
2115
        case 0x40:
2116
            /* XOR */
2117
            if (likely(rc != 31)) {
2118
                if (ra != 31) {
2119
                    if (islit)
2120
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2121
                    else
2122
                        tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2123
                } else {
2124
                    if (islit)
2125
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2126
                    else
2127
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2128
                }
2129
            }
2130
            break;
2131
        case 0x44:
2132
            /* CMOVLT */
2133
            gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2134
            break;
2135
        case 0x46:
2136
            /* CMOVGE */
2137
            gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2138
            break;
2139
        case 0x48:
2140
            /* EQV */
2141
            if (likely(rc != 31)) {
2142
                if (ra != 31) {
2143
                    if (islit)
2144
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2145
                    else
2146
                        tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2147
                } else {
2148
                    if (islit)
2149
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2150
                    else
2151
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2152
                }
2153
            }
2154
            break;
2155
        case 0x61:
2156
            /* AMASK */
2157
            if (likely(rc != 31)) {
2158
                uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2159

    
2160
                if (islit) {
2161
                    tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2162
                } else {
2163
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2164
                }
2165
            }
2166
            break;
2167
        case 0x64:
2168
            /* CMOVLE */
2169
            gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2170
            break;
2171
        case 0x66:
2172
            /* CMOVGT */
2173
            gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2174
            break;
2175
        case 0x6C:
2176
            /* IMPLVER */
2177
            if (rc != 31)
2178
                tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2179
            break;
2180
        default:
2181
            goto invalid_opc;
2182
        }
2183
        break;
2184
    case 0x12:
2185
        switch (fn7) {
2186
        case 0x02:
2187
            /* MSKBL */
2188
            gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2189
            break;
2190
        case 0x06:
2191
            /* EXTBL */
2192
            gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2193
            break;
2194
        case 0x0B:
2195
            /* INSBL */
2196
            gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2197
            break;
2198
        case 0x12:
2199
            /* MSKWL */
2200
            gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2201
            break;
2202
        case 0x16:
2203
            /* EXTWL */
2204
            gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2205
            break;
2206
        case 0x1B:
2207
            /* INSWL */
2208
            gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2209
            break;
2210
        case 0x22:
2211
            /* MSKLL */
2212
            gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2213
            break;
2214
        case 0x26:
2215
            /* EXTLL */
2216
            gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2217
            break;
2218
        case 0x2B:
2219
            /* INSLL */
2220
            gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2221
            break;
2222
        case 0x30:
2223
            /* ZAP */
2224
            gen_zap(ra, rb, rc, islit, lit);
2225
            break;
2226
        case 0x31:
2227
            /* ZAPNOT */
2228
            gen_zapnot(ra, rb, rc, islit, lit);
2229
            break;
2230
        case 0x32:
2231
            /* MSKQL */
2232
            gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2233
            break;
2234
        case 0x34:
2235
            /* SRL */
2236
            if (likely(rc != 31)) {
2237
                if (ra != 31) {
2238
                    if (islit)
2239
                        tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2240
                    else {
2241
                        TCGv shift = tcg_temp_new();
2242
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2243
                        tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2244
                        tcg_temp_free(shift);
2245
                    }
2246
                } else
2247
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2248
            }
2249
            break;
2250
        case 0x36:
2251
            /* EXTQL */
2252
            gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2253
            break;
2254
        case 0x39:
2255
            /* SLL */
2256
            if (likely(rc != 31)) {
2257
                if (ra != 31) {
2258
                    if (islit)
2259
                        tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2260
                    else {
2261
                        TCGv shift = tcg_temp_new();
2262
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2263
                        tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2264
                        tcg_temp_free(shift);
2265
                    }
2266
                } else
2267
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2268
            }
2269
            break;
2270
        case 0x3B:
2271
            /* INSQL */
2272
            gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2273
            break;
2274
        case 0x3C:
2275
            /* SRA */
2276
            if (likely(rc != 31)) {
2277
                if (ra != 31) {
2278
                    if (islit)
2279
                        tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2280
                    else {
2281
                        TCGv shift = tcg_temp_new();
2282
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2283
                        tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2284
                        tcg_temp_free(shift);
2285
                    }
2286
                } else
2287
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2288
            }
2289
            break;
2290
        case 0x52:
2291
            /* MSKWH */
2292
            gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2293
            break;
2294
        case 0x57:
2295
            /* INSWH */
2296
            gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2297
            break;
2298
        case 0x5A:
2299
            /* EXTWH */
2300
            gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2301
            break;
2302
        case 0x62:
2303
            /* MSKLH */
2304
            gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2305
            break;
2306
        case 0x67:
2307
            /* INSLH */
2308
            gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2309
            break;
2310
        case 0x6A:
2311
            /* EXTLH */
2312
            gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2313
            break;
2314
        case 0x72:
2315
            /* MSKQH */
2316
            gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2317
            break;
2318
        case 0x77:
2319
            /* INSQH */
2320
            gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2321
            break;
2322
        case 0x7A:
2323
            /* EXTQH */
2324
            gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2325
            break;
2326
        default:
2327
            goto invalid_opc;
2328
        }
2329
        break;
2330
    case 0x13:
2331
        switch (fn7) {
2332
        case 0x00:
2333
            /* MULL */
2334
            if (likely(rc != 31)) {
2335
                if (ra == 31)
2336
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2337
                else {
2338
                    if (islit)
2339
                        tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2340
                    else
2341
                        tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2342
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2343
                }
2344
            }
2345
            break;
2346
        case 0x20:
2347
            /* MULQ */
2348
            if (likely(rc != 31)) {
2349
                if (ra == 31)
2350
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2351
                else if (islit)
2352
                    tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2353
                else
2354
                    tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2355
            }
2356
            break;
2357
        case 0x30:
2358
            /* UMULH */
2359
            gen_umulh(ra, rb, rc, islit, lit);
2360
            break;
2361
        case 0x40:
2362
            /* MULL/V */
2363
            gen_mullv(ra, rb, rc, islit, lit);
2364
            break;
2365
        case 0x60:
2366
            /* MULQ/V */
2367
            gen_mulqv(ra, rb, rc, islit, lit);
2368
            break;
2369
        default:
2370
            goto invalid_opc;
2371
        }
2372
        break;
2373
    case 0x14:
2374
        switch (fpfn) { /* fn11 & 0x3F */
2375
        case 0x04:
2376
            /* ITOFS */
2377
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2378
                goto invalid_opc;
2379
            }
2380
            if (likely(rc != 31)) {
2381
                if (ra != 31) {
2382
                    TCGv_i32 tmp = tcg_temp_new_i32();
2383
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2384
                    gen_helper_memory_to_s(cpu_fir[rc], tmp);
2385
                    tcg_temp_free_i32(tmp);
2386
                } else
2387
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2388
            }
2389
            break;
2390
        case 0x0A:
2391
            /* SQRTF */
2392
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2393
                gen_fsqrtf(rb, rc);
2394
                break;
2395
            }
2396
            goto invalid_opc;
2397
        case 0x0B:
2398
            /* SQRTS */
2399
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2400
                gen_fsqrts(ctx, rb, rc, fn11);
2401
                break;
2402
            }
2403
            goto invalid_opc;
2404
        case 0x14:
2405
            /* ITOFF */
2406
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2407
                goto invalid_opc;
2408
            }
2409
            if (likely(rc != 31)) {
2410
                if (ra != 31) {
2411
                    TCGv_i32 tmp = tcg_temp_new_i32();
2412
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2413
                    gen_helper_memory_to_f(cpu_fir[rc], tmp);
2414
                    tcg_temp_free_i32(tmp);
2415
                } else
2416
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2417
            }
2418
            break;
2419
        case 0x24:
2420
            /* ITOFT */
2421
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2422
                goto invalid_opc;
2423
            }
2424
            if (likely(rc != 31)) {
2425
                if (ra != 31)
2426
                    tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2427
                else
2428
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2429
            }
2430
            break;
2431
        case 0x2A:
2432
            /* SQRTG */
2433
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2434
                gen_fsqrtg(rb, rc);
2435
                break;
2436
            }
2437
            goto invalid_opc;
2438
        case 0x02B:
2439
            /* SQRTT */
2440
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2441
                gen_fsqrtt(ctx, rb, rc, fn11);
2442
                break;
2443
            }
2444
            goto invalid_opc;
2445
        default:
2446
            goto invalid_opc;
2447
        }
2448
        break;
2449
    case 0x15:
2450
        /* VAX floating point */
2451
        /* XXX: rounding mode and trap are ignored (!) */
2452
        switch (fpfn) { /* fn11 & 0x3F */
2453
        case 0x00:
2454
            /* ADDF */
2455
            gen_faddf(ra, rb, rc);
2456
            break;
2457
        case 0x01:
2458
            /* SUBF */
2459
            gen_fsubf(ra, rb, rc);
2460
            break;
2461
        case 0x02:
2462
            /* MULF */
2463
            gen_fmulf(ra, rb, rc);
2464
            break;
2465
        case 0x03:
2466
            /* DIVF */
2467
            gen_fdivf(ra, rb, rc);
2468
            break;
2469
        case 0x1E:
2470
            /* CVTDG */
2471
#if 0 // TODO
2472
            gen_fcvtdg(rb, rc);
2473
#else
2474
            goto invalid_opc;
2475
#endif
2476
            break;
2477
        case 0x20:
2478
            /* ADDG */
2479
            gen_faddg(ra, rb, rc);
2480
            break;
2481
        case 0x21:
2482
            /* SUBG */
2483
            gen_fsubg(ra, rb, rc);
2484
            break;
2485
        case 0x22:
2486
            /* MULG */
2487
            gen_fmulg(ra, rb, rc);
2488
            break;
2489
        case 0x23:
2490
            /* DIVG */
2491
            gen_fdivg(ra, rb, rc);
2492
            break;
2493
        case 0x25:
2494
            /* CMPGEQ */
2495
            gen_fcmpgeq(ra, rb, rc);
2496
            break;
2497
        case 0x26:
2498
            /* CMPGLT */
2499
            gen_fcmpglt(ra, rb, rc);
2500
            break;
2501
        case 0x27:
2502
            /* CMPGLE */
2503
            gen_fcmpgle(ra, rb, rc);
2504
            break;
2505
        case 0x2C:
2506
            /* CVTGF */
2507
            gen_fcvtgf(rb, rc);
2508
            break;
2509
        case 0x2D:
2510
            /* CVTGD */
2511
#if 0 // TODO
2512
            gen_fcvtgd(rb, rc);
2513
#else
2514
            goto invalid_opc;
2515
#endif
2516
            break;
2517
        case 0x2F:
2518
            /* CVTGQ */
2519
            gen_fcvtgq(rb, rc);
2520
            break;
2521
        case 0x3C:
2522
            /* CVTQF */
2523
            gen_fcvtqf(rb, rc);
2524
            break;
2525
        case 0x3E:
2526
            /* CVTQG */
2527
            gen_fcvtqg(rb, rc);
2528
            break;
2529
        default:
2530
            goto invalid_opc;
2531
        }
2532
        break;
2533
    case 0x16:
2534
        /* IEEE floating-point */
2535
        switch (fpfn) { /* fn11 & 0x3F */
2536
        case 0x00:
2537
            /* ADDS */
2538
            gen_fadds(ctx, ra, rb, rc, fn11);
2539
            break;
2540
        case 0x01:
2541
            /* SUBS */
2542
            gen_fsubs(ctx, ra, rb, rc, fn11);
2543
            break;
2544
        case 0x02:
2545
            /* MULS */
2546
            gen_fmuls(ctx, ra, rb, rc, fn11);
2547
            break;
2548
        case 0x03:
2549
            /* DIVS */
2550
            gen_fdivs(ctx, ra, rb, rc, fn11);
2551
            break;
2552
        case 0x20:
2553
            /* ADDT */
2554
            gen_faddt(ctx, ra, rb, rc, fn11);
2555
            break;
2556
        case 0x21:
2557
            /* SUBT */
2558
            gen_fsubt(ctx, ra, rb, rc, fn11);
2559
            break;
2560
        case 0x22:
2561
            /* MULT */
2562
            gen_fmult(ctx, ra, rb, rc, fn11);
2563
            break;
2564
        case 0x23:
2565
            /* DIVT */
2566
            gen_fdivt(ctx, ra, rb, rc, fn11);
2567
            break;
2568
        case 0x24:
2569
            /* CMPTUN */
2570
            gen_fcmptun(ctx, ra, rb, rc, fn11);
2571
            break;
2572
        case 0x25:
2573
            /* CMPTEQ */
2574
            gen_fcmpteq(ctx, ra, rb, rc, fn11);
2575
            break;
2576
        case 0x26:
2577
            /* CMPTLT */
2578
            gen_fcmptlt(ctx, ra, rb, rc, fn11);
2579
            break;
2580
        case 0x27:
2581
            /* CMPTLE */
2582
            gen_fcmptle(ctx, ra, rb, rc, fn11);
2583
            break;
2584
        case 0x2C:
2585
            if (fn11 == 0x2AC || fn11 == 0x6AC) {
2586
                /* CVTST */
2587
                gen_fcvtst(ctx, rb, rc, fn11);
2588
            } else {
2589
                /* CVTTS */
2590
                gen_fcvtts(ctx, rb, rc, fn11);
2591
            }
2592
            break;
2593
        case 0x2F:
2594
            /* CVTTQ */
2595
            gen_fcvttq(ctx, rb, rc, fn11);
2596
            break;
2597
        case 0x3C:
2598
            /* CVTQS */
2599
            gen_fcvtqs(ctx, rb, rc, fn11);
2600
            break;
2601
        case 0x3E:
2602
            /* CVTQT */
2603
            gen_fcvtqt(ctx, rb, rc, fn11);
2604
            break;
2605
        default:
2606
            goto invalid_opc;
2607
        }
2608
        break;
2609
    case 0x17:
2610
        switch (fn11) {
2611
        case 0x010:
2612
            /* CVTLQ */
2613
            gen_fcvtlq(rb, rc);
2614
            break;
2615
        case 0x020:
2616
            if (likely(rc != 31)) {
2617
                if (ra == rb) {
2618
                    /* FMOV */
2619
                    if (ra == 31)
2620
                        tcg_gen_movi_i64(cpu_fir[rc], 0);
2621
                    else
2622
                        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2623
                } else {
2624
                    /* CPYS */
2625
                    gen_fcpys(ra, rb, rc);
2626
                }
2627
            }
2628
            break;
2629
        case 0x021:
2630
            /* CPYSN */
2631
            gen_fcpysn(ra, rb, rc);
2632
            break;
2633
        case 0x022:
2634
            /* CPYSE */
2635
            gen_fcpyse(ra, rb, rc);
2636
            break;
2637
        case 0x024:
2638
            /* MT_FPCR */
2639
            if (likely(ra != 31))
2640
                gen_helper_store_fpcr(cpu_fir[ra]);
2641
            else {
2642
                TCGv tmp = tcg_const_i64(0);
2643
                gen_helper_store_fpcr(tmp);
2644
                tcg_temp_free(tmp);
2645
            }
2646
            break;
2647
        case 0x025:
2648
            /* MF_FPCR */
2649
            if (likely(ra != 31))
2650
                gen_helper_load_fpcr(cpu_fir[ra]);
2651
            break;
2652
        case 0x02A:
2653
            /* FCMOVEQ */
2654
            gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2655
            break;
2656
        case 0x02B:
2657
            /* FCMOVNE */
2658
            gen_fcmov(TCG_COND_NE, ra, rb, rc);
2659
            break;
2660
        case 0x02C:
2661
            /* FCMOVLT */
2662
            gen_fcmov(TCG_COND_LT, ra, rb, rc);
2663
            break;
2664
        case 0x02D:
2665
            /* FCMOVGE */
2666
            gen_fcmov(TCG_COND_GE, ra, rb, rc);
2667
            break;
2668
        case 0x02E:
2669
            /* FCMOVLE */
2670
            gen_fcmov(TCG_COND_LE, ra, rb, rc);
2671
            break;
2672
        case 0x02F:
2673
            /* FCMOVGT */
2674
            gen_fcmov(TCG_COND_GT, ra, rb, rc);
2675
            break;
2676
        case 0x030:
2677
            /* CVTQL */
2678
            gen_fcvtql(rb, rc);
2679
            break;
2680
        case 0x130:
2681
            /* CVTQL/V */
2682
        case 0x530:
2683
            /* CVTQL/SV */
2684
            /* ??? I'm pretty sure there's nothing that /sv needs to do that
2685
               /v doesn't do.  The only thing I can think is that /sv is a
2686
               valid instruction merely for completeness in the ISA.  */
2687
            gen_fcvtql_v(ctx, rb, rc);
2688
            break;
2689
        default:
2690
            goto invalid_opc;
2691
        }
2692
        break;
2693
    case 0x18:
2694
        switch ((uint16_t)disp16) {
2695
        case 0x0000:
2696
            /* TRAPB */
2697
            /* No-op.  */
2698
            break;
2699
        case 0x0400:
2700
            /* EXCB */
2701
            /* No-op.  */
2702
            break;
2703
        case 0x4000:
2704
            /* MB */
2705
            /* No-op */
2706
            break;
2707
        case 0x4400:
2708
            /* WMB */
2709
            /* No-op */
2710
            break;
2711
        case 0x8000:
2712
            /* FETCH */
2713
            /* No-op */
2714
            break;
2715
        case 0xA000:
2716
            /* FETCH_M */
2717
            /* No-op */
2718
            break;
2719
        case 0xC000:
2720
            /* RPCC */
2721
            if (ra != 31)
2722
                gen_helper_load_pcc(cpu_ir[ra]);
2723
            break;
2724
        case 0xE000:
2725
            /* RC */
2726
            gen_rx(ra, 0);
2727
            break;
2728
        case 0xE800:
2729
            /* ECB */
2730
            break;
2731
        case 0xF000:
2732
            /* RS */
2733
            gen_rx(ra, 1);
2734
            break;
2735
        case 0xF800:
2736
            /* WH64 */
2737
            /* No-op */
2738
            break;
2739
        default:
2740
            goto invalid_opc;
2741
        }
2742
        break;
2743
    case 0x19:
2744
        /* HW_MFPR (PALcode) */
2745
#ifndef CONFIG_USER_ONLY
2746
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2747
            gen_mfpr(ra, insn & 0xffff);
2748
            break;
2749
        }
2750
#endif
2751
        goto invalid_opc;
2752
    case 0x1A:
2753
        /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2754
           prediction stack action, which of course we don't implement.  */
2755
        if (rb != 31) {
2756
            tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2757
        } else {
2758
            tcg_gen_movi_i64(cpu_pc, 0);
2759
        }
2760
        if (ra != 31) {
2761
            tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2762
        }
2763
        ret = EXIT_PC_UPDATED;
2764
        break;
2765
    case 0x1B:
2766
        /* HW_LD (PALcode) */
2767
#ifndef CONFIG_USER_ONLY
2768
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2769
            TCGv addr;
2770

    
2771
            if (ra == 31) {
2772
                break;
2773
            }
2774

    
2775
            addr = tcg_temp_new();
2776
            if (rb != 31)
2777
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2778
            else
2779
                tcg_gen_movi_i64(addr, disp12);
2780
            switch ((insn >> 12) & 0xF) {
2781
            case 0x0:
2782
                /* Longword physical access (hw_ldl/p) */
2783
                gen_helper_ldl_phys(cpu_ir[ra], addr);
2784
                break;
2785
            case 0x1:
2786
                /* Quadword physical access (hw_ldq/p) */
2787
                gen_helper_ldq_phys(cpu_ir[ra], addr);
2788
                break;
2789
            case 0x2:
2790
                /* Longword physical access with lock (hw_ldl_l/p) */
2791
                gen_helper_ldl_l_phys(cpu_ir[ra], addr);
2792
                break;
2793
            case 0x3:
2794
                /* Quadword physical access with lock (hw_ldq_l/p) */
2795
                gen_helper_ldq_l_phys(cpu_ir[ra], addr);
2796
                break;
2797
            case 0x4:
2798
                /* Longword virtual PTE fetch (hw_ldl/v) */
2799
                goto invalid_opc;
2800
            case 0x5:
2801
                /* Quadword virtual PTE fetch (hw_ldq/v) */
2802
                goto invalid_opc;
2803
                break;
2804
            case 0x6:
2805
                /* Incpu_ir[ra]id */
2806
                goto invalid_opc;
2807
            case 0x7:
2808
                /* Incpu_ir[ra]id */
2809
                goto invalid_opc;
2810
            case 0x8:
2811
                /* Longword virtual access (hw_ldl) */
2812
                goto invalid_opc;
2813
            case 0x9:
2814
                /* Quadword virtual access (hw_ldq) */
2815
                goto invalid_opc;
2816
            case 0xA:
2817
                /* Longword virtual access with protection check (hw_ldl/w) */
2818
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2819
                break;
2820
            case 0xB:
2821
                /* Quadword virtual access with protection check (hw_ldq/w) */
2822
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2823
                break;
2824
            case 0xC:
2825
                /* Longword virtual access with alt access mode (hw_ldl/a)*/
2826
                goto invalid_opc;
2827
            case 0xD:
2828
                /* Quadword virtual access with alt access mode (hw_ldq/a) */
2829
                goto invalid_opc;
2830
            case 0xE:
2831
                /* Longword virtual access with alternate access mode and
2832
                   protection checks (hw_ldl/wa) */
2833
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2834
                break;
2835
            case 0xF:
2836
                /* Quadword virtual access with alternate access mode and
2837
                   protection checks (hw_ldq/wa) */
2838
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2839
                break;
2840
            }
2841
            tcg_temp_free(addr);
2842
            break;
2843
        }
2844
#endif
2845
        goto invalid_opc;
2846
    case 0x1C:
2847
        switch (fn7) {
2848
        case 0x00:
2849
            /* SEXTB */
2850
            if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
2851
                goto invalid_opc;
2852
            }
2853
            if (likely(rc != 31)) {
2854
                if (islit)
2855
                    tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2856
                else
2857
                    tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2858
            }
2859
            break;
2860
        case 0x01:
2861
            /* SEXTW */
2862
            if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2863
                if (likely(rc != 31)) {
2864
                    if (islit) {
2865
                        tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2866
                    } else {
2867
                        tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2868
                    }
2869
                }
2870
                break;
2871
            }
2872
            goto invalid_opc;
2873
        case 0x30:
2874
            /* CTPOP */
2875
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2876
                if (likely(rc != 31)) {
2877
                    if (islit) {
2878
                        tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2879
                    } else {
2880
                        gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2881
                    }
2882
                }
2883
                break;
2884
            }
2885
            goto invalid_opc;
2886
        case 0x31:
2887
            /* PERR */
2888
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2889
                gen_perr(ra, rb, rc, islit, lit);
2890
                break;
2891
            }
2892
            goto invalid_opc;
2893
        case 0x32:
2894
            /* CTLZ */
2895
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2896
                if (likely(rc != 31)) {
2897
                    if (islit) {
2898
                        tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2899
                    } else {
2900
                        gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2901
                    }
2902
                }
2903
                break;
2904
            }
2905
            goto invalid_opc;
2906
        case 0x33:
2907
            /* CTTZ */
2908
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2909
                if (likely(rc != 31)) {
2910
                    if (islit) {
2911
                        tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2912
                    } else {
2913
                        gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2914
                    }
2915
                }
2916
                break;
2917
            }
2918
            goto invalid_opc;
2919
        case 0x34:
2920
            /* UNPKBW */
2921
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2922
                if (real_islit || ra != 31) {
2923
                    goto invalid_opc;
2924
                }
2925
                gen_unpkbw(rb, rc);
2926
                break;
2927
            }
2928
            goto invalid_opc;
2929
        case 0x35:
2930
            /* UNPKBL */
2931
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2932
                if (real_islit || ra != 31) {
2933
                    goto invalid_opc;
2934
                }
2935
                gen_unpkbl(rb, rc);
2936
                break;
2937
            }
2938
            goto invalid_opc;
2939
        case 0x36:
2940
            /* PKWB */
2941
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2942
                if (real_islit || ra != 31) {
2943
                    goto invalid_opc;
2944
                }
2945
                gen_pkwb(rb, rc);
2946
                break;
2947
            }
2948
            goto invalid_opc;
2949
        case 0x37:
2950
            /* PKLB */
2951
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2952
                if (real_islit || ra != 31) {
2953
                    goto invalid_opc;
2954
                }
2955
                gen_pklb(rb, rc);
2956
                break;
2957
            }
2958
            goto invalid_opc;
2959
        case 0x38:
2960
            /* MINSB8 */
2961
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2962
                gen_minsb8(ra, rb, rc, islit, lit);
2963
                break;
2964
            }
2965
            goto invalid_opc;
2966
        case 0x39:
2967
            /* MINSW4 */
2968
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2969
                gen_minsw4(ra, rb, rc, islit, lit);
2970
                break;
2971
            }
2972
            goto invalid_opc;
2973
        case 0x3A:
2974
            /* MINUB8 */
2975
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2976
                gen_minub8(ra, rb, rc, islit, lit);
2977
                break;
2978
            }
2979
            goto invalid_opc;
2980
        case 0x3B:
2981
            /* MINUW4 */
2982
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2983
                gen_minuw4(ra, rb, rc, islit, lit);
2984
                break;
2985
            }
2986
            goto invalid_opc;
2987
        case 0x3C:
2988
            /* MAXUB8 */
2989
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2990
                gen_maxub8(ra, rb, rc, islit, lit);
2991
                break;
2992
            }
2993
            goto invalid_opc;
2994
        case 0x3D:
2995
            /* MAXUW4 */
2996
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2997
                gen_maxuw4(ra, rb, rc, islit, lit);
2998
                break;
2999
            }
3000
            goto invalid_opc;
3001
        case 0x3E:
3002
            /* MAXSB8 */
3003
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3004
                gen_maxsb8(ra, rb, rc, islit, lit);
3005
                break;
3006
            }
3007
            goto invalid_opc;
3008
        case 0x3F:
3009
            /* MAXSW4 */
3010
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3011
                gen_maxsw4(ra, rb, rc, islit, lit);
3012
                break;
3013
            }
3014
            goto invalid_opc;
3015
        case 0x70:
3016
            /* FTOIT */
3017
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3018
                goto invalid_opc;
3019
            }
3020
            if (likely(rc != 31)) {
3021
                if (ra != 31)
3022
                    tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3023
                else
3024
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
3025
            }
3026
            break;
3027
        case 0x78:
3028
            /* FTOIS */
3029
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3030
                goto invalid_opc;
3031
            }
3032
            if (rc != 31) {
3033
                TCGv_i32 tmp1 = tcg_temp_new_i32();
3034
                if (ra != 31)
3035
                    gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3036
                else {
3037
                    TCGv tmp2 = tcg_const_i64(0);
3038
                    gen_helper_s_to_memory(tmp1, tmp2);
3039
                    tcg_temp_free(tmp2);
3040
                }
3041
                tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3042
                tcg_temp_free_i32(tmp1);
3043
            }
3044
            break;
3045
        default:
3046
            goto invalid_opc;
3047
        }
3048
        break;
3049
    case 0x1D:
3050
        /* HW_MTPR (PALcode) */
3051
#ifndef CONFIG_USER_ONLY
3052
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3053
            gen_mtpr(rb, insn & 0xffff);
3054
            break;
3055
        }
3056
#endif
3057
        goto invalid_opc;
3058
    case 0x1E:
3059
        /* HW_RET (PALcode) */
3060
#ifndef CONFIG_USER_ONLY
3061
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3062
            if (rb == 31) {
3063
                /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3064
                   address from EXC_ADDR.  This turns out to be useful for our
3065
                   emulation PALcode, so continue to accept it.  */
3066
                TCGv tmp = tcg_temp_new();
3067
                tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUState, exc_addr));
3068
                gen_helper_hw_ret(tmp);
3069
                tcg_temp_free(tmp);
3070
            } else {
3071
                gen_helper_hw_ret(cpu_ir[rb]);
3072
            }
3073
            ret = EXIT_PC_UPDATED;
3074
            break;
3075
        }
3076
#endif
3077
        goto invalid_opc;
3078
    case 0x1F:
3079
        /* HW_ST (PALcode) */
3080
#ifndef CONFIG_USER_ONLY
3081
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3082
            TCGv addr, val;
3083
            addr = tcg_temp_new();
3084
            if (rb != 31)
3085
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3086
            else
3087
                tcg_gen_movi_i64(addr, disp12);
3088
            if (ra != 31)
3089
                val = cpu_ir[ra];
3090
            else {
3091
                val = tcg_temp_new();
3092
                tcg_gen_movi_i64(val, 0);
3093
            }
3094
            switch ((insn >> 12) & 0xF) {
3095
            case 0x0:
3096
                /* Longword physical access */
3097
                gen_helper_stl_phys(addr, val);
3098
                break;
3099
            case 0x1:
3100
                /* Quadword physical access */
3101
                gen_helper_stq_phys(addr, val);
3102
                break;
3103
            case 0x2:
3104
                /* Longword physical access with lock */
3105
                gen_helper_stl_c_phys(val, addr, val);
3106
                break;
3107
            case 0x3:
3108
                /* Quadword physical access with lock */
3109
                gen_helper_stq_c_phys(val, addr, val);
3110
                break;
3111
            case 0x4:
3112
                /* Longword virtual access */
3113
                goto invalid_opc;
3114
            case 0x5:
3115
                /* Quadword virtual access */
3116
                goto invalid_opc;
3117
            case 0x6:
3118
                /* Invalid */
3119
                goto invalid_opc;
3120
            case 0x7:
3121
                /* Invalid */
3122
                goto invalid_opc;
3123
            case 0x8:
3124
                /* Invalid */
3125
                goto invalid_opc;
3126
            case 0x9:
3127
                /* Invalid */
3128
                goto invalid_opc;
3129
            case 0xA:
3130
                /* Invalid */
3131
                goto invalid_opc;
3132
            case 0xB:
3133
                /* Invalid */
3134
                goto invalid_opc;
3135
            case 0xC:
3136
                /* Longword virtual access with alternate access mode */
3137
                goto invalid_opc;
3138
            case 0xD:
3139
                /* Quadword virtual access with alternate access mode */
3140
                goto invalid_opc;
3141
            case 0xE:
3142
                /* Invalid */
3143
                goto invalid_opc;
3144
            case 0xF:
3145
                /* Invalid */
3146
                goto invalid_opc;
3147
            }
3148
            if (ra == 31)
3149
                tcg_temp_free(val);
3150
            tcg_temp_free(addr);
3151
            break;
3152
        }
3153
#endif
3154
        goto invalid_opc;
3155
    case 0x20:
3156
        /* LDF */
3157
        gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3158
        break;
3159
    case 0x21:
3160
        /* LDG */
3161
        gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3162
        break;
3163
    case 0x22:
3164
        /* LDS */
3165
        gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3166
        break;
3167
    case 0x23:
3168
        /* LDT */
3169
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3170
        break;
3171
    case 0x24:
3172
        /* STF */
3173
        gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3174
        break;
3175
    case 0x25:
3176
        /* STG */
3177
        gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3178
        break;
3179
    case 0x26:
3180
        /* STS */
3181
        gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3182
        break;
3183
    case 0x27:
3184
        /* STT */
3185
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3186
        break;
3187
    case 0x28:
3188
        /* LDL */
3189
        gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3190
        break;
3191
    case 0x29:
3192
        /* LDQ */
3193
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3194
        break;
3195
    case 0x2A:
3196
        /* LDL_L */
3197
        gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3198
        break;
3199
    case 0x2B:
3200
        /* LDQ_L */
3201
        gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3202
        break;
3203
    case 0x2C:
3204
        /* STL */
3205
        gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3206
        break;
3207
    case 0x2D:
3208
        /* STQ */
3209
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3210
        break;
3211
    case 0x2E:
3212
        /* STL_C */
3213
        ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3214
        break;
3215
    case 0x2F:
3216
        /* STQ_C */
3217
        ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3218
        break;
3219
    case 0x30:
3220
        /* BR */
3221
        ret = gen_bdirect(ctx, ra, disp21);
3222
        break;
3223
    case 0x31: /* FBEQ */
3224
        ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3225
        break;
3226
    case 0x32: /* FBLT */
3227
        ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3228
        break;
3229
    case 0x33: /* FBLE */
3230
        ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3231
        break;
3232
    case 0x34:
3233
        /* BSR */
3234
        ret = gen_bdirect(ctx, ra, disp21);
3235
        break;
3236
    case 0x35: /* FBNE */
3237
        ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3238
        break;
3239
    case 0x36: /* FBGE */
3240
        ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3241
        break;
3242
    case 0x37: /* FBGT */
3243
        ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3244
        break;
3245
    case 0x38:
3246
        /* BLBC */
3247
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3248
        break;
3249
    case 0x39:
3250
        /* BEQ */
3251
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3252
        break;
3253
    case 0x3A:
3254
        /* BLT */
3255
        ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3256
        break;
3257
    case 0x3B:
3258
        /* BLE */
3259
        ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3260
        break;
3261
    case 0x3C:
3262
        /* BLBS */
3263
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3264
        break;
3265
    case 0x3D:
3266
        /* BNE */
3267
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3268
        break;
3269
    case 0x3E:
3270
        /* BGE */
3271
        ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3272
        break;
3273
    case 0x3F:
3274
        /* BGT */
3275
        ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3276
        break;
3277
    invalid_opc:
3278
        ret = gen_invalid(ctx);
3279
        break;
3280
    }
3281

    
3282
    return ret;
3283
}
3284

    
3285
static inline void gen_intermediate_code_internal(CPUState *env,
3286
                                                  TranslationBlock *tb,
3287
                                                  int search_pc)
3288
{
3289
    DisasContext ctx, *ctxp = &ctx;
3290
    target_ulong pc_start;
3291
    uint32_t insn;
3292
    uint16_t *gen_opc_end;
3293
    CPUBreakpoint *bp;
3294
    int j, lj = -1;
3295
    ExitStatus ret;
3296
    int num_insns;
3297
    int max_insns;
3298

    
3299
    pc_start = tb->pc;
3300
    gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3301

    
3302
    ctx.tb = tb;
3303
    ctx.env = env;
3304
    ctx.pc = pc_start;
3305
    ctx.mem_idx = cpu_mmu_index(env);
3306

    
3307
    /* ??? Every TB begins with unset rounding mode, to be initialized on
3308
       the first fp insn of the TB.  Alternately we could define a proper
3309
       default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3310
       to reset the FP_STATUS to that default at the end of any TB that
3311
       changes the default.  We could even (gasp) dynamiclly figure out
3312
       what default would be most efficient given the running program.  */
3313
    ctx.tb_rm = -1;
3314
    /* Similarly for flush-to-zero.  */
3315
    ctx.tb_ftz = -1;
3316

    
3317
    num_insns = 0;
3318
    max_insns = tb->cflags & CF_COUNT_MASK;
3319
    if (max_insns == 0)
3320
        max_insns = CF_COUNT_MASK;
3321

    
3322
    gen_icount_start();
3323
    do {
3324
        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3325
            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3326
                if (bp->pc == ctx.pc) {
3327
                    gen_excp(&ctx, EXCP_DEBUG, 0);
3328
                    break;
3329
                }
3330
            }
3331
        }
3332
        if (search_pc) {
3333
            j = gen_opc_ptr - gen_opc_buf;
3334
            if (lj < j) {
3335
                lj++;
3336
                while (lj < j)
3337
                    gen_opc_instr_start[lj++] = 0;
3338
            }
3339
            gen_opc_pc[lj] = ctx.pc;
3340
            gen_opc_instr_start[lj] = 1;
3341
            gen_opc_icount[lj] = num_insns;
3342
        }
3343
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3344
            gen_io_start();
3345
        insn = ldl_code(ctx.pc);
3346
        num_insns++;
3347

    
3348
        if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3349
            tcg_gen_debug_insn_start(ctx.pc);
3350
        }
3351

    
3352
        ctx.pc += 4;
3353
        ret = translate_one(ctxp, insn);
3354

    
3355
        /* If we reach a page boundary, are single stepping,
3356
           or exhaust instruction count, stop generation.  */
3357
        if (ret == NO_EXIT
3358
            && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3359
                || gen_opc_ptr >= gen_opc_end
3360
                || num_insns >= max_insns
3361
                || singlestep
3362
                || env->singlestep_enabled)) {
3363
            ret = EXIT_PC_STALE;
3364
        }
3365
    } while (ret == NO_EXIT);
3366

    
3367
    if (tb->cflags & CF_LAST_IO) {
3368
        gen_io_end();
3369
    }
3370

    
3371
    switch (ret) {
3372
    case EXIT_GOTO_TB:
3373
    case EXIT_NORETURN:
3374
        break;
3375
    case EXIT_PC_STALE:
3376
        tcg_gen_movi_i64(cpu_pc, ctx.pc);
3377
        /* FALLTHRU */
3378
    case EXIT_PC_UPDATED:
3379
        if (env->singlestep_enabled) {
3380
            gen_excp_1(EXCP_DEBUG, 0);
3381
        } else {
3382
            tcg_gen_exit_tb(0);
3383
        }
3384
        break;
3385
    default:
3386
        abort();
3387
    }
3388

    
3389
    gen_icount_end(tb, num_insns);
3390
    *gen_opc_ptr = INDEX_op_end;
3391
    if (search_pc) {
3392
        j = gen_opc_ptr - gen_opc_buf;
3393
        lj++;
3394
        while (lj <= j)
3395
            gen_opc_instr_start[lj++] = 0;
3396
    } else {
3397
        tb->size = ctx.pc - pc_start;
3398
        tb->icount = num_insns;
3399
    }
3400

    
3401
#ifdef DEBUG_DISAS
3402
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3403
        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3404
        log_target_disas(pc_start, ctx.pc - pc_start, 1);
3405
        qemu_log("\n");
3406
    }
3407
#endif
3408
}
3409

    
3410
void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3411
{
3412
    gen_intermediate_code_internal(env, tb, 0);
3413
}
3414

    
3415
void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3416
{
3417
    gen_intermediate_code_internal(env, tb, 1);
3418
}
3419

    
3420
struct cpu_def_t {
3421
    const char *name;
3422
    int implver, amask;
3423
};
3424

    
3425
static const struct cpu_def_t cpu_defs[] = {
3426
    { "ev4",   IMPLVER_2106x, 0 },
3427
    { "ev5",   IMPLVER_21164, 0 },
3428
    { "ev56",  IMPLVER_21164, AMASK_BWX },
3429
    { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3430
    { "ev6",   IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3431
    { "ev67",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3432
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3433
    { "ev68",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3434
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3435
    { "21064", IMPLVER_2106x, 0 },
3436
    { "21164", IMPLVER_21164, 0 },
3437
    { "21164a", IMPLVER_21164, AMASK_BWX },
3438
    { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3439
    { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3440
    { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3441
                                | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3442
};
3443

    
3444
CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3445
{
3446
    CPUAlphaState *env;
3447
    int implver, amask, i, max;
3448

    
3449
    env = qemu_mallocz(sizeof(CPUAlphaState));
3450
    cpu_exec_init(env);
3451
    alpha_translate_init();
3452
    tlb_flush(env, 1);
3453

    
3454
    /* Default to ev67; no reason not to emulate insns by default.  */
3455
    implver = IMPLVER_21264;
3456
    amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3457
             | AMASK_TRAP | AMASK_PREFETCH);
3458

    
3459
    max = ARRAY_SIZE(cpu_defs);
3460
    for (i = 0; i < max; i++) {
3461
        if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3462
            implver = cpu_defs[i].implver;
3463
            amask = cpu_defs[i].amask;
3464
            break;
3465
        }
3466
    }
3467
    env->implver = implver;
3468
    env->amask = amask;
3469

    
3470
#if defined (CONFIG_USER_ONLY)
3471
    env->ps = PS_USER_MODE;
3472
    cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3473
                               | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3474
#endif
3475
    env->lock_addr = -1;
3476
    env->fen = 1;
3477

    
3478
    qemu_init_vcpu(env);
3479
    return env;
3480
}
3481

    
3482
void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
3483
{
3484
    env->pc = gen_opc_pc[pc_pos];
3485
}