Statistics
| Branch: | Revision:

root / target-alpha / translate.c @ 2ace7e55

History | View | Annotate | Download (102.6 kB)

1
/*
2
 *  Alpha emulation cpu translation for qemu.
3
 *
4
 *  Copyright (c) 2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include <stdint.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23

    
24
#include "cpu.h"
25
#include "exec-all.h"
26
#include "disas.h"
27
#include "host-utils.h"
28
#include "tcg-op.h"
29
#include "qemu-common.h"
30

    
31
#include "helper.h"
32
#define GEN_HELPER 1
33
#include "helper.h"
34

    
35
#undef ALPHA_DEBUG_DISAS
36
#define CONFIG_SOFTFLOAT_INLINE
37

    
38
#ifdef ALPHA_DEBUG_DISAS
39
#  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40
#else
41
#  define LOG_DISAS(...) do { } while (0)
42
#endif
43

    
44
typedef struct DisasContext DisasContext;
45
struct DisasContext {
46
    struct TranslationBlock *tb;
47
    CPUAlphaState *env;
48
    uint64_t pc;
49
    int mem_idx;
50

    
51
    /* Current rounding mode for this TB.  */
52
    int tb_rm;
53
    /* Current flush-to-zero setting for this TB.  */
54
    int tb_ftz;
55
};
56

    
57
/* Return values from translate_one, indicating the state of the TB.
58
   Note that zero indicates that we are not exiting the TB.  */
59

    
60
typedef enum {
61
    NO_EXIT,
62

    
63
    /* We have emitted one or more goto_tb.  No fixup required.  */
64
    EXIT_GOTO_TB,
65

    
66
    /* We are not using a goto_tb (for whatever reason), but have updated
67
       the PC (for whatever reason), so there's no need to do it again on
68
       exiting the TB.  */
69
    EXIT_PC_UPDATED,
70

    
71
    /* We are exiting the TB, but have neither emitted a goto_tb, nor
72
       updated the PC for the next instruction to be executed.  */
73
    EXIT_PC_STALE,
74

    
75
    /* We are ending the TB with a noreturn function call, e.g. longjmp.
76
       No following code will be executed.  */
77
    EXIT_NORETURN,
78
} ExitStatus;
79

    
80
/* global register indexes */
81
static TCGv_ptr cpu_env;
82
static TCGv cpu_ir[31];
83
static TCGv cpu_fir[31];
84
static TCGv cpu_pc;
85
static TCGv cpu_lock_addr;
86
static TCGv cpu_lock_st_addr;
87
static TCGv cpu_lock_value;
88
static TCGv cpu_unique;
89
#ifndef CONFIG_USER_ONLY
90
static TCGv cpu_sysval;
91
static TCGv cpu_usp;
92
#endif
93

    
94
/* register names */
95
static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
96

    
97
#include "gen-icount.h"
98

    
99
static void alpha_translate_init(void)
100
{
101
    int i;
102
    char *p;
103
    static int done_init = 0;
104

    
105
    if (done_init)
106
        return;
107

    
108
    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
109

    
110
    p = cpu_reg_names;
111
    for (i = 0; i < 31; i++) {
112
        sprintf(p, "ir%d", i);
113
        cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
114
                                           offsetof(CPUState, ir[i]), p);
115
        p += (i < 10) ? 4 : 5;
116

    
117
        sprintf(p, "fir%d", i);
118
        cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
119
                                            offsetof(CPUState, fir[i]), p);
120
        p += (i < 10) ? 5 : 6;
121
    }
122

    
123
    cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
124
                                    offsetof(CPUState, pc), "pc");
125

    
126
    cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
127
                                           offsetof(CPUState, lock_addr),
128
                                           "lock_addr");
129
    cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
130
                                              offsetof(CPUState, lock_st_addr),
131
                                              "lock_st_addr");
132
    cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
133
                                            offsetof(CPUState, lock_value),
134
                                            "lock_value");
135

    
136
    cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
137
                                        offsetof(CPUState, unique), "unique");
138
#ifndef CONFIG_USER_ONLY
139
    cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
140
                                        offsetof(CPUState, sysval), "sysval");
141
    cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
142
                                     offsetof(CPUState, usp), "usp");
143
#endif
144

    
145
    /* register helpers */
146
#define GEN_HELPER 2
147
#include "helper.h"
148

    
149
    done_init = 1;
150
}
151

    
152
static void gen_excp_1(int exception, int error_code)
153
{
154
    TCGv_i32 tmp1, tmp2;
155

    
156
    tmp1 = tcg_const_i32(exception);
157
    tmp2 = tcg_const_i32(error_code);
158
    gen_helper_excp(tmp1, tmp2);
159
    tcg_temp_free_i32(tmp2);
160
    tcg_temp_free_i32(tmp1);
161
}
162

    
163
static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
164
{
165
    tcg_gen_movi_i64(cpu_pc, ctx->pc);
166
    gen_excp_1(exception, error_code);
167
    return EXIT_NORETURN;
168
}
169

    
170
static inline ExitStatus gen_invalid(DisasContext *ctx)
171
{
172
    return gen_excp(ctx, EXCP_OPCDEC, 0);
173
}
174

    
175
static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
176
{
177
    TCGv tmp = tcg_temp_new();
178
    TCGv_i32 tmp32 = tcg_temp_new_i32();
179
    tcg_gen_qemu_ld32u(tmp, t1, flags);
180
    tcg_gen_trunc_i64_i32(tmp32, tmp);
181
    gen_helper_memory_to_f(t0, tmp32);
182
    tcg_temp_free_i32(tmp32);
183
    tcg_temp_free(tmp);
184
}
185

    
186
static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
187
{
188
    TCGv tmp = tcg_temp_new();
189
    tcg_gen_qemu_ld64(tmp, t1, flags);
190
    gen_helper_memory_to_g(t0, tmp);
191
    tcg_temp_free(tmp);
192
}
193

    
194
static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
195
{
196
    TCGv tmp = tcg_temp_new();
197
    TCGv_i32 tmp32 = tcg_temp_new_i32();
198
    tcg_gen_qemu_ld32u(tmp, t1, flags);
199
    tcg_gen_trunc_i64_i32(tmp32, tmp);
200
    gen_helper_memory_to_s(t0, tmp32);
201
    tcg_temp_free_i32(tmp32);
202
    tcg_temp_free(tmp);
203
}
204

    
205
static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
206
{
207
    tcg_gen_qemu_ld32s(t0, t1, flags);
208
    tcg_gen_mov_i64(cpu_lock_addr, t1);
209
    tcg_gen_mov_i64(cpu_lock_value, t0);
210
}
211

    
212
static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
213
{
214
    tcg_gen_qemu_ld64(t0, t1, flags);
215
    tcg_gen_mov_i64(cpu_lock_addr, t1);
216
    tcg_gen_mov_i64(cpu_lock_value, t0);
217
}
218

    
219
static inline void gen_load_mem(DisasContext *ctx,
220
                                void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
221
                                                          int flags),
222
                                int ra, int rb, int32_t disp16, int fp,
223
                                int clear)
224
{
225
    TCGv addr, va;
226

    
227
    /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
228
       prefetches, which we can treat as nops.  No worries about
229
       missed exceptions here.  */
230
    if (unlikely(ra == 31)) {
231
        return;
232
    }
233

    
234
    addr = tcg_temp_new();
235
    if (rb != 31) {
236
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
237
        if (clear) {
238
            tcg_gen_andi_i64(addr, addr, ~0x7);
239
        }
240
    } else {
241
        if (clear) {
242
            disp16 &= ~0x7;
243
        }
244
        tcg_gen_movi_i64(addr, disp16);
245
    }
246

    
247
    va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
248
    tcg_gen_qemu_load(va, addr, ctx->mem_idx);
249

    
250
    tcg_temp_free(addr);
251
}
252

    
253
static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
254
{
255
    TCGv_i32 tmp32 = tcg_temp_new_i32();
256
    TCGv tmp = tcg_temp_new();
257
    gen_helper_f_to_memory(tmp32, t0);
258
    tcg_gen_extu_i32_i64(tmp, tmp32);
259
    tcg_gen_qemu_st32(tmp, t1, flags);
260
    tcg_temp_free(tmp);
261
    tcg_temp_free_i32(tmp32);
262
}
263

    
264
static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
265
{
266
    TCGv tmp = tcg_temp_new();
267
    gen_helper_g_to_memory(tmp, t0);
268
    tcg_gen_qemu_st64(tmp, t1, flags);
269
    tcg_temp_free(tmp);
270
}
271

    
272
static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
273
{
274
    TCGv_i32 tmp32 = tcg_temp_new_i32();
275
    TCGv tmp = tcg_temp_new();
276
    gen_helper_s_to_memory(tmp32, t0);
277
    tcg_gen_extu_i32_i64(tmp, tmp32);
278
    tcg_gen_qemu_st32(tmp, t1, flags);
279
    tcg_temp_free(tmp);
280
    tcg_temp_free_i32(tmp32);
281
}
282

    
283
static inline void gen_store_mem(DisasContext *ctx,
284
                                 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
285
                                                            int flags),
286
                                 int ra, int rb, int32_t disp16, int fp,
287
                                 int clear)
288
{
289
    TCGv addr, va;
290

    
291
    addr = tcg_temp_new();
292
    if (rb != 31) {
293
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
294
        if (clear) {
295
            tcg_gen_andi_i64(addr, addr, ~0x7);
296
        }
297
    } else {
298
        if (clear) {
299
            disp16 &= ~0x7;
300
        }
301
        tcg_gen_movi_i64(addr, disp16);
302
    }
303

    
304
    if (ra == 31) {
305
        va = tcg_const_i64(0);
306
    } else {
307
        va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
308
    }
309
    tcg_gen_qemu_store(va, addr, ctx->mem_idx);
310

    
311
    tcg_temp_free(addr);
312
    if (ra == 31) {
313
        tcg_temp_free(va);
314
    }
315
}
316

    
317
static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
318
                                        int32_t disp16, int quad)
319
{
320
    TCGv addr;
321

    
322
    if (ra == 31) {
323
        /* ??? Don't bother storing anything.  The user can't tell
324
           the difference, since the zero register always reads zero.  */
325
        return NO_EXIT;
326
    }
327

    
328
#if defined(CONFIG_USER_ONLY)
329
    addr = cpu_lock_st_addr;
330
#else
331
    addr = tcg_temp_local_new();
332
#endif
333

    
334
    if (rb != 31) {
335
        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
336
    } else {
337
        tcg_gen_movi_i64(addr, disp16);
338
    }
339

    
340
#if defined(CONFIG_USER_ONLY)
341
    /* ??? This is handled via a complicated version of compare-and-swap
342
       in the cpu_loop.  Hopefully one day we'll have a real CAS opcode
343
       in TCG so that this isn't necessary.  */
344
    return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
345
#else
346
    /* ??? In system mode we are never multi-threaded, so CAS can be
347
       implemented via a non-atomic load-compare-store sequence.  */
348
    {
349
        int lab_fail, lab_done;
350
        TCGv val;
351

    
352
        lab_fail = gen_new_label();
353
        lab_done = gen_new_label();
354
        tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
355

    
356
        val = tcg_temp_new();
357
        if (quad) {
358
            tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
359
        } else {
360
            tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
361
        }
362
        tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
363

    
364
        if (quad) {
365
            tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
366
        } else {
367
            tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
368
        }
369
        tcg_gen_movi_i64(cpu_ir[ra], 1);
370
        tcg_gen_br(lab_done);
371

    
372
        gen_set_label(lab_fail);
373
        tcg_gen_movi_i64(cpu_ir[ra], 0);
374

    
375
        gen_set_label(lab_done);
376
        tcg_gen_movi_i64(cpu_lock_addr, -1);
377

    
378
        tcg_temp_free(addr);
379
        return NO_EXIT;
380
    }
381
#endif
382
}
383

    
384
static int use_goto_tb(DisasContext *ctx, uint64_t dest)
385
{
386
    /* Check for the dest on the same page as the start of the TB.  We
387
       also want to suppress goto_tb in the case of single-steping and IO.  */
388
    return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
389
            && !ctx->env->singlestep_enabled
390
            && !(ctx->tb->cflags & CF_LAST_IO));
391
}
392

    
393
static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
394
{
395
    uint64_t dest = ctx->pc + (disp << 2);
396

    
397
    if (ra != 31) {
398
        tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
399
    }
400

    
401
    /* Notice branch-to-next; used to initialize RA with the PC.  */
402
    if (disp == 0) {
403
        return 0;
404
    } else if (use_goto_tb(ctx, dest)) {
405
        tcg_gen_goto_tb(0);
406
        tcg_gen_movi_i64(cpu_pc, dest);
407
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
408
        return EXIT_GOTO_TB;
409
    } else {
410
        tcg_gen_movi_i64(cpu_pc, dest);
411
        return EXIT_PC_UPDATED;
412
    }
413
}
414

    
415
static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
416
                                     TCGv cmp, int32_t disp)
417
{
418
    uint64_t dest = ctx->pc + (disp << 2);
419
    int lab_true = gen_new_label();
420

    
421
    if (use_goto_tb(ctx, dest)) {
422
        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
423

    
424
        tcg_gen_goto_tb(0);
425
        tcg_gen_movi_i64(cpu_pc, ctx->pc);
426
        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
427

    
428
        gen_set_label(lab_true);
429
        tcg_gen_goto_tb(1);
430
        tcg_gen_movi_i64(cpu_pc, dest);
431
        tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
432

    
433
        return EXIT_GOTO_TB;
434
    } else {
435
        int lab_over = gen_new_label();
436

    
437
        /* ??? Consider using either
438
             movi pc, next
439
             addi tmp, pc, disp
440
             movcond pc, cond, 0, tmp, pc
441
           or
442
             setcond tmp, cond, 0
443
             movi pc, next
444
             neg tmp, tmp
445
             andi tmp, tmp, disp
446
             add pc, pc, tmp
447
           The current diamond subgraph surely isn't efficient.  */
448

    
449
        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
450
        tcg_gen_movi_i64(cpu_pc, ctx->pc);
451
        tcg_gen_br(lab_over);
452
        gen_set_label(lab_true);
453
        tcg_gen_movi_i64(cpu_pc, dest);
454
        gen_set_label(lab_over);
455

    
456
        return EXIT_PC_UPDATED;
457
    }
458
}
459

    
460
static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
461
                            int32_t disp, int mask)
462
{
463
    TCGv cmp_tmp;
464

    
465
    if (unlikely(ra == 31)) {
466
        cmp_tmp = tcg_const_i64(0);
467
    } else {
468
        cmp_tmp = tcg_temp_new();
469
        if (mask) {
470
            tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
471
        } else {
472
            tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
473
        }
474
    }
475

    
476
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
477
}
478

    
479
/* Fold -0.0 for comparison with COND.  */
480

    
481
static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
482
{
483
    uint64_t mzero = 1ull << 63;
484

    
485
    switch (cond) {
486
    case TCG_COND_LE:
487
    case TCG_COND_GT:
488
        /* For <= or >, the -0.0 value directly compares the way we want.  */
489
        tcg_gen_mov_i64(dest, src);
490
        break;
491

    
492
    case TCG_COND_EQ:
493
    case TCG_COND_NE:
494
        /* For == or !=, we can simply mask off the sign bit and compare.  */
495
        tcg_gen_andi_i64(dest, src, mzero - 1);
496
        break;
497

    
498
    case TCG_COND_GE:
499
    case TCG_COND_LT:
500
        /* For >= or <, map -0.0 to +0.0 via comparison and mask.  */
501
        tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
502
        tcg_gen_neg_i64(dest, dest);
503
        tcg_gen_and_i64(dest, dest, src);
504
        break;
505

    
506
    default:
507
        abort();
508
    }
509
}
510

    
511
static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
512
                             int32_t disp)
513
{
514
    TCGv cmp_tmp;
515

    
516
    if (unlikely(ra == 31)) {
517
        /* Very uncommon case, but easier to optimize it to an integer
518
           comparison than continuing with the floating point comparison.  */
519
        return gen_bcond(ctx, cond, ra, disp, 0);
520
    }
521

    
522
    cmp_tmp = tcg_temp_new();
523
    gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
524
    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
525
}
526

    
527
static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
528
                     int islit, uint8_t lit, int mask)
529
{
530
    TCGCond inv_cond = tcg_invert_cond(cond);
531
    int l1;
532

    
533
    if (unlikely(rc == 31))
534
        return;
535

    
536
    l1 = gen_new_label();
537

    
538
    if (ra != 31) {
539
        if (mask) {
540
            TCGv tmp = tcg_temp_new();
541
            tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
542
            tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
543
            tcg_temp_free(tmp);
544
        } else
545
            tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
546
    } else {
547
        /* Very uncommon case - Do not bother to optimize.  */
548
        TCGv tmp = tcg_const_i64(0);
549
        tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
550
        tcg_temp_free(tmp);
551
    }
552

    
553
    if (islit)
554
        tcg_gen_movi_i64(cpu_ir[rc], lit);
555
    else
556
        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
557
    gen_set_label(l1);
558
}
559

    
560
static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
561
{
562
    TCGv cmp_tmp;
563
    int l1;
564

    
565
    if (unlikely(rc == 31)) {
566
        return;
567
    }
568

    
569
    cmp_tmp = tcg_temp_new();
570
    if (unlikely(ra == 31)) {
571
        tcg_gen_movi_i64(cmp_tmp, 0);
572
    } else {
573
        gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
574
    }
575

    
576
    l1 = gen_new_label();
577
    tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
578
    tcg_temp_free(cmp_tmp);
579

    
580
    if (rb != 31)
581
        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
582
    else
583
        tcg_gen_movi_i64(cpu_fir[rc], 0);
584
    gen_set_label(l1);
585
}
586

    
587
#define QUAL_RM_N       0x080   /* Round mode nearest even */
588
#define QUAL_RM_C       0x000   /* Round mode chopped */
589
#define QUAL_RM_M       0x040   /* Round mode minus infinity */
590
#define QUAL_RM_D       0x0c0   /* Round mode dynamic */
591
#define QUAL_RM_MASK    0x0c0
592

    
593
#define QUAL_U          0x100   /* Underflow enable (fp output) */
594
#define QUAL_V          0x100   /* Overflow enable (int output) */
595
#define QUAL_S          0x400   /* Software completion enable */
596
#define QUAL_I          0x200   /* Inexact detection enable */
597

    
598
static void gen_qual_roundmode(DisasContext *ctx, int fn11)
599
{
600
    TCGv_i32 tmp;
601

    
602
    fn11 &= QUAL_RM_MASK;
603
    if (fn11 == ctx->tb_rm) {
604
        return;
605
    }
606
    ctx->tb_rm = fn11;
607

    
608
    tmp = tcg_temp_new_i32();
609
    switch (fn11) {
610
    case QUAL_RM_N:
611
        tcg_gen_movi_i32(tmp, float_round_nearest_even);
612
        break;
613
    case QUAL_RM_C:
614
        tcg_gen_movi_i32(tmp, float_round_to_zero);
615
        break;
616
    case QUAL_RM_M:
617
        tcg_gen_movi_i32(tmp, float_round_down);
618
        break;
619
    case QUAL_RM_D:
620
        tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
621
        break;
622
    }
623

    
624
#if defined(CONFIG_SOFTFLOAT_INLINE)
625
    /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
626
       With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
627
       sets the one field.  */
628
    tcg_gen_st8_i32(tmp, cpu_env,
629
                    offsetof(CPUState, fp_status.float_rounding_mode));
630
#else
631
    gen_helper_setroundmode(tmp);
632
#endif
633

    
634
    tcg_temp_free_i32(tmp);
635
}
636

    
637
static void gen_qual_flushzero(DisasContext *ctx, int fn11)
638
{
639
    TCGv_i32 tmp;
640

    
641
    fn11 &= QUAL_U;
642
    if (fn11 == ctx->tb_ftz) {
643
        return;
644
    }
645
    ctx->tb_ftz = fn11;
646

    
647
    tmp = tcg_temp_new_i32();
648
    if (fn11) {
649
        /* Underflow is enabled, use the FPCR setting.  */
650
        tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
651
    } else {
652
        /* Underflow is disabled, force flush-to-zero.  */
653
        tcg_gen_movi_i32(tmp, 1);
654
    }
655

    
656
#if defined(CONFIG_SOFTFLOAT_INLINE)
657
    tcg_gen_st8_i32(tmp, cpu_env,
658
                    offsetof(CPUState, fp_status.flush_to_zero));
659
#else
660
    gen_helper_setflushzero(tmp);
661
#endif
662

    
663
    tcg_temp_free_i32(tmp);
664
}
665

    
666
static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
667
{
668
    TCGv val = tcg_temp_new();
669
    if (reg == 31) {
670
        tcg_gen_movi_i64(val, 0);
671
    } else if (fn11 & QUAL_S) {
672
        gen_helper_ieee_input_s(val, cpu_fir[reg]);
673
    } else if (is_cmp) {
674
        gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
675
    } else {
676
        gen_helper_ieee_input(val, cpu_fir[reg]);
677
    }
678
    return val;
679
}
680

    
681
static void gen_fp_exc_clear(void)
682
{
683
#if defined(CONFIG_SOFTFLOAT_INLINE)
684
    TCGv_i32 zero = tcg_const_i32(0);
685
    tcg_gen_st8_i32(zero, cpu_env,
686
                    offsetof(CPUState, fp_status.float_exception_flags));
687
    tcg_temp_free_i32(zero);
688
#else
689
    gen_helper_fp_exc_clear();
690
#endif
691
}
692

    
693
static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
694
{
695
    /* ??? We ought to be able to do something with imprecise exceptions.
696
       E.g. notice we're still in the trap shadow of something within the
697
       TB and do not generate the code to signal the exception; end the TB
698
       when an exception is forced to arrive, either by consumption of a
699
       register value or TRAPB or EXCB.  */
700
    TCGv_i32 exc = tcg_temp_new_i32();
701
    TCGv_i32 reg;
702

    
703
#if defined(CONFIG_SOFTFLOAT_INLINE)
704
    tcg_gen_ld8u_i32(exc, cpu_env,
705
                     offsetof(CPUState, fp_status.float_exception_flags));
706
#else
707
    gen_helper_fp_exc_get(exc);
708
#endif
709

    
710
    if (ignore) {
711
        tcg_gen_andi_i32(exc, exc, ~ignore);
712
    }
713

    
714
    /* ??? Pass in the regno of the destination so that the helper can
715
       set EXC_MASK, which contains a bitmask of destination registers
716
       that have caused arithmetic traps.  A simple userspace emulation
717
       does not require this.  We do need it for a guest kernel's entArith,
718
       or if we were to do something clever with imprecise exceptions.  */
719
    reg = tcg_const_i32(rc + 32);
720

    
721
    if (fn11 & QUAL_S) {
722
        gen_helper_fp_exc_raise_s(exc, reg);
723
    } else {
724
        gen_helper_fp_exc_raise(exc, reg);
725
    }
726

    
727
    tcg_temp_free_i32(reg);
728
    tcg_temp_free_i32(exc);
729
}
730

    
731
static inline void gen_fp_exc_raise(int rc, int fn11)
732
{
733
    gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
734
}
735

    
736
static void gen_fcvtlq(int rb, int rc)
737
{
738
    if (unlikely(rc == 31)) {
739
        return;
740
    }
741
    if (unlikely(rb == 31)) {
742
        tcg_gen_movi_i64(cpu_fir[rc], 0);
743
    } else {
744
        TCGv tmp = tcg_temp_new();
745

    
746
        /* The arithmetic right shift here, plus the sign-extended mask below
747
           yields a sign-extended result without an explicit ext32s_i64.  */
748
        tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
749
        tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
750
        tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
751
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
752
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
753

    
754
        tcg_temp_free(tmp);
755
    }
756
}
757

    
758
static void gen_fcvtql(int rb, int rc)
759
{
760
    if (unlikely(rc == 31)) {
761
        return;
762
    }
763
    if (unlikely(rb == 31)) {
764
        tcg_gen_movi_i64(cpu_fir[rc], 0);
765
    } else {
766
        TCGv tmp = tcg_temp_new();
767

    
768
        tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
769
        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
770
        tcg_gen_shli_i64(tmp, tmp, 32);
771
        tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
772
        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
773

    
774
        tcg_temp_free(tmp);
775
    }
776
}
777

    
778
static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
779
{
780
    if (rb != 31) {
781
        int lab = gen_new_label();
782
        TCGv tmp = tcg_temp_new();
783

    
784
        tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
785
        tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
786
        gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
787

    
788
        gen_set_label(lab);
789
    }
790
    gen_fcvtql(rb, rc);
791
}
792

    
793
#define FARITH2(name)                                   \
794
static inline void glue(gen_f, name)(int rb, int rc)    \
795
{                                                       \
796
    if (unlikely(rc == 31)) {                           \
797
        return;                                         \
798
    }                                                   \
799
    if (rb != 31) {                                     \
800
        gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
801
    } else {                                                \
802
        TCGv tmp = tcg_const_i64(0);                    \
803
        gen_helper_ ## name (cpu_fir[rc], tmp);         \
804
        tcg_temp_free(tmp);                             \
805
    }                                                   \
806
}
807

    
808
/* ??? VAX instruction qualifiers ignored.  */
809
FARITH2(sqrtf)
810
FARITH2(sqrtg)
811
FARITH2(cvtgf)
812
FARITH2(cvtgq)
813
FARITH2(cvtqf)
814
FARITH2(cvtqg)
815

    
816
static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
817
                            int rb, int rc, int fn11)
818
{
819
    TCGv vb;
820

    
821
    /* ??? This is wrong: the instruction is not a nop, it still may
822
       raise exceptions.  */
823
    if (unlikely(rc == 31)) {
824
        return;
825
    }
826

    
827
    gen_qual_roundmode(ctx, fn11);
828
    gen_qual_flushzero(ctx, fn11);
829
    gen_fp_exc_clear();
830

    
831
    vb = gen_ieee_input(rb, fn11, 0);
832
    helper(cpu_fir[rc], vb);
833
    tcg_temp_free(vb);
834

    
835
    gen_fp_exc_raise(rc, fn11);
836
}
837

    
838
#define IEEE_ARITH2(name)                                       \
839
static inline void glue(gen_f, name)(DisasContext *ctx,         \
840
                                     int rb, int rc, int fn11)  \
841
{                                                               \
842
    gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
843
}
844
IEEE_ARITH2(sqrts)
845
IEEE_ARITH2(sqrtt)
846
IEEE_ARITH2(cvtst)
847
IEEE_ARITH2(cvtts)
848

    
849
static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
850
{
851
    TCGv vb;
852
    int ignore = 0;
853

    
854
    /* ??? This is wrong: the instruction is not a nop, it still may
855
       raise exceptions.  */
856
    if (unlikely(rc == 31)) {
857
        return;
858
    }
859

    
860
    /* No need to set flushzero, since we have an integer output.  */
861
    gen_fp_exc_clear();
862
    vb = gen_ieee_input(rb, fn11, 0);
863

    
864
    /* Almost all integer conversions use cropped rounding, and most
865
       also do not have integer overflow enabled.  Special case that.  */
866
    switch (fn11) {
867
    case QUAL_RM_C:
868
        gen_helper_cvttq_c(cpu_fir[rc], vb);
869
        break;
870
    case QUAL_V | QUAL_RM_C:
871
    case QUAL_S | QUAL_V | QUAL_RM_C:
872
        ignore = float_flag_inexact;
873
        /* FALLTHRU */
874
    case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
875
        gen_helper_cvttq_svic(cpu_fir[rc], vb);
876
        break;
877
    default:
878
        gen_qual_roundmode(ctx, fn11);
879
        gen_helper_cvttq(cpu_fir[rc], vb);
880
        ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
881
        ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
882
        break;
883
    }
884
    tcg_temp_free(vb);
885

    
886
    gen_fp_exc_raise_ignore(rc, fn11, ignore);
887
}
888

    
889
static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
890
                            int rb, int rc, int fn11)
891
{
892
    TCGv vb;
893

    
894
    /* ??? This is wrong: the instruction is not a nop, it still may
895
       raise exceptions.  */
896
    if (unlikely(rc == 31)) {
897
        return;
898
    }
899

    
900
    gen_qual_roundmode(ctx, fn11);
901

    
902
    if (rb == 31) {
903
        vb = tcg_const_i64(0);
904
    } else {
905
        vb = cpu_fir[rb];
906
    }
907

    
908
    /* The only exception that can be raised by integer conversion
909
       is inexact.  Thus we only need to worry about exceptions when
910
       inexact handling is requested.  */
911
    if (fn11 & QUAL_I) {
912
        gen_fp_exc_clear();
913
        helper(cpu_fir[rc], vb);
914
        gen_fp_exc_raise(rc, fn11);
915
    } else {
916
        helper(cpu_fir[rc], vb);
917
    }
918

    
919
    if (rb == 31) {
920
        tcg_temp_free(vb);
921
    }
922
}
923

    
924
#define IEEE_INTCVT(name)                                       \
925
static inline void glue(gen_f, name)(DisasContext *ctx,         \
926
                                     int rb, int rc, int fn11)  \
927
{                                                               \
928
    gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
929
}
930
IEEE_INTCVT(cvtqs)
931
IEEE_INTCVT(cvtqt)
932

    
933
static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
934
{
935
    TCGv va, vb, vmask;
936
    int za = 0, zb = 0;
937

    
938
    if (unlikely(rc == 31)) {
939
        return;
940
    }
941

    
942
    vmask = tcg_const_i64(mask);
943

    
944
    TCGV_UNUSED_I64(va);
945
    if (ra == 31) {
946
        if (inv_a) {
947
            va = vmask;
948
        } else {
949
            za = 1;
950
        }
951
    } else {
952
        va = tcg_temp_new_i64();
953
        tcg_gen_mov_i64(va, cpu_fir[ra]);
954
        if (inv_a) {
955
            tcg_gen_andc_i64(va, vmask, va);
956
        } else {
957
            tcg_gen_and_i64(va, va, vmask);
958
        }
959
    }
960

    
961
    TCGV_UNUSED_I64(vb);
962
    if (rb == 31) {
963
        zb = 1;
964
    } else {
965
        vb = tcg_temp_new_i64();
966
        tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
967
    }
968

    
969
    switch (za << 1 | zb) {
970
    case 0 | 0:
971
        tcg_gen_or_i64(cpu_fir[rc], va, vb);
972
        break;
973
    case 0 | 1:
974
        tcg_gen_mov_i64(cpu_fir[rc], va);
975
        break;
976
    case 2 | 0:
977
        tcg_gen_mov_i64(cpu_fir[rc], vb);
978
        break;
979
    case 2 | 1:
980
        tcg_gen_movi_i64(cpu_fir[rc], 0);
981
        break;
982
    }
983

    
984
    tcg_temp_free(vmask);
985
    if (ra != 31) {
986
        tcg_temp_free(va);
987
    }
988
    if (rb != 31) {
989
        tcg_temp_free(vb);
990
    }
991
}
992

    
993
static inline void gen_fcpys(int ra, int rb, int rc)
994
{
995
    gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
996
}
997

    
998
static inline void gen_fcpysn(int ra, int rb, int rc)
999
{
1000
    gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1001
}
1002

    
1003
static inline void gen_fcpyse(int ra, int rb, int rc)
1004
{
1005
    gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1006
}
1007

    
1008
#define FARITH3(name)                                           \
1009
static inline void glue(gen_f, name)(int ra, int rb, int rc)    \
1010
{                                                               \
1011
    TCGv va, vb;                                                \
1012
                                                                \
1013
    if (unlikely(rc == 31)) {                                   \
1014
        return;                                                 \
1015
    }                                                           \
1016
    if (ra == 31) {                                             \
1017
        va = tcg_const_i64(0);                                  \
1018
    } else {                                                    \
1019
        va = cpu_fir[ra];                                       \
1020
    }                                                           \
1021
    if (rb == 31) {                                             \
1022
        vb = tcg_const_i64(0);                                  \
1023
    } else {                                                    \
1024
        vb = cpu_fir[rb];                                       \
1025
    }                                                           \
1026
                                                                \
1027
    gen_helper_ ## name (cpu_fir[rc], va, vb);                  \
1028
                                                                \
1029
    if (ra == 31) {                                             \
1030
        tcg_temp_free(va);                                      \
1031
    }                                                           \
1032
    if (rb == 31) {                                             \
1033
        tcg_temp_free(vb);                                      \
1034
    }                                                           \
1035
}
1036

    
1037
/* ??? VAX instruction qualifiers ignored.  */
1038
FARITH3(addf)
1039
FARITH3(subf)
1040
FARITH3(mulf)
1041
FARITH3(divf)
1042
FARITH3(addg)
1043
FARITH3(subg)
1044
FARITH3(mulg)
1045
FARITH3(divg)
1046
FARITH3(cmpgeq)
1047
FARITH3(cmpglt)
1048
FARITH3(cmpgle)
1049

    
1050
static void gen_ieee_arith3(DisasContext *ctx,
1051
                            void (*helper)(TCGv, TCGv, TCGv),
1052
                            int ra, int rb, int rc, int fn11)
1053
{
1054
    TCGv va, vb;
1055

    
1056
    /* ??? This is wrong: the instruction is not a nop, it still may
1057
       raise exceptions.  */
1058
    if (unlikely(rc == 31)) {
1059
        return;
1060
    }
1061

    
1062
    gen_qual_roundmode(ctx, fn11);
1063
    gen_qual_flushzero(ctx, fn11);
1064
    gen_fp_exc_clear();
1065

    
1066
    va = gen_ieee_input(ra, fn11, 0);
1067
    vb = gen_ieee_input(rb, fn11, 0);
1068
    helper(cpu_fir[rc], va, vb);
1069
    tcg_temp_free(va);
1070
    tcg_temp_free(vb);
1071

    
1072
    gen_fp_exc_raise(rc, fn11);
1073
}
1074

    
1075
#define IEEE_ARITH3(name)                                               \
1076
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1077
                                     int ra, int rb, int rc, int fn11)  \
1078
{                                                                       \
1079
    gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
1080
}
1081
IEEE_ARITH3(adds)
1082
IEEE_ARITH3(subs)
1083
IEEE_ARITH3(muls)
1084
IEEE_ARITH3(divs)
1085
IEEE_ARITH3(addt)
1086
IEEE_ARITH3(subt)
1087
IEEE_ARITH3(mult)
1088
IEEE_ARITH3(divt)
1089

    
1090
static void gen_ieee_compare(DisasContext *ctx,
1091
                             void (*helper)(TCGv, TCGv, TCGv),
1092
                             int ra, int rb, int rc, int fn11)
1093
{
1094
    TCGv va, vb;
1095

    
1096
    /* ??? This is wrong: the instruction is not a nop, it still may
1097
       raise exceptions.  */
1098
    if (unlikely(rc == 31)) {
1099
        return;
1100
    }
1101

    
1102
    gen_fp_exc_clear();
1103

    
1104
    va = gen_ieee_input(ra, fn11, 1);
1105
    vb = gen_ieee_input(rb, fn11, 1);
1106
    helper(cpu_fir[rc], va, vb);
1107
    tcg_temp_free(va);
1108
    tcg_temp_free(vb);
1109

    
1110
    gen_fp_exc_raise(rc, fn11);
1111
}
1112

    
1113
#define IEEE_CMP3(name)                                                 \
1114
static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1115
                                     int ra, int rb, int rc, int fn11)  \
1116
{                                                                       \
1117
    gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
1118
}
1119
IEEE_CMP3(cmptun)
1120
IEEE_CMP3(cmpteq)
1121
IEEE_CMP3(cmptlt)
1122
IEEE_CMP3(cmptle)
1123

    
1124
static inline uint64_t zapnot_mask(uint8_t lit)
1125
{
1126
    uint64_t mask = 0;
1127
    int i;
1128

    
1129
    for (i = 0; i < 8; ++i) {
1130
        if ((lit >> i) & 1)
1131
            mask |= 0xffull << (i * 8);
1132
    }
1133
    return mask;
1134
}
1135

    
1136
/* Implement zapnot with an immediate operand, which expands to some
1137
   form of immediate AND.  This is a basic building block in the
1138
   definition of many of the other byte manipulation instructions.  */
1139
static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1140
{
1141
    switch (lit) {
1142
    case 0x00:
1143
        tcg_gen_movi_i64(dest, 0);
1144
        break;
1145
    case 0x01:
1146
        tcg_gen_ext8u_i64(dest, src);
1147
        break;
1148
    case 0x03:
1149
        tcg_gen_ext16u_i64(dest, src);
1150
        break;
1151
    case 0x0f:
1152
        tcg_gen_ext32u_i64(dest, src);
1153
        break;
1154
    case 0xff:
1155
        tcg_gen_mov_i64(dest, src);
1156
        break;
1157
    default:
1158
        tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1159
        break;
1160
    }
1161
}
1162

    
1163
static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1164
{
1165
    if (unlikely(rc == 31))
1166
        return;
1167
    else if (unlikely(ra == 31))
1168
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1169
    else if (islit)
1170
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1171
    else
1172
        gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1173
}
1174

    
1175
static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1176
{
1177
    if (unlikely(rc == 31))
1178
        return;
1179
    else if (unlikely(ra == 31))
1180
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1181
    else if (islit)
1182
        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1183
    else
1184
        gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1185
}
1186

    
1187

    
1188
/* EXTWH, EXTLH, EXTQH */
1189
static void gen_ext_h(int ra, int rb, int rc, int islit,
1190
                      uint8_t lit, uint8_t byte_mask)
1191
{
1192
    if (unlikely(rc == 31))
1193
        return;
1194
    else if (unlikely(ra == 31))
1195
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1196
    else {
1197
        if (islit) {
1198
            lit = (64 - (lit & 7) * 8) & 0x3f;
1199
            tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1200
        } else {
1201
            TCGv tmp1 = tcg_temp_new();
1202
            tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1203
            tcg_gen_shli_i64(tmp1, tmp1, 3);
1204
            tcg_gen_neg_i64(tmp1, tmp1);
1205
            tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1206
            tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1207
            tcg_temp_free(tmp1);
1208
        }
1209
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1210
    }
1211
}
1212

    
1213
/* EXTBL, EXTWL, EXTLL, EXTQL */
1214
static void gen_ext_l(int ra, int rb, int rc, int islit,
1215
                      uint8_t lit, uint8_t byte_mask)
1216
{
1217
    if (unlikely(rc == 31))
1218
        return;
1219
    else if (unlikely(ra == 31))
1220
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1221
    else {
1222
        if (islit) {
1223
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1224
        } else {
1225
            TCGv tmp = tcg_temp_new();
1226
            tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1227
            tcg_gen_shli_i64(tmp, tmp, 3);
1228
            tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1229
            tcg_temp_free(tmp);
1230
        }
1231
        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1232
    }
1233
}
1234

    
1235
/* INSWH, INSLH, INSQH */
1236
static void gen_ins_h(int ra, int rb, int rc, int islit,
1237
                      uint8_t lit, uint8_t byte_mask)
1238
{
1239
    if (unlikely(rc == 31))
1240
        return;
1241
    else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1242
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1243
    else {
1244
        TCGv tmp = tcg_temp_new();
1245

    
1246
        /* The instruction description has us left-shift the byte mask
1247
           and extract bits <15:8> and apply that zap at the end.  This
1248
           is equivalent to simply performing the zap first and shifting
1249
           afterward.  */
1250
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1251

    
1252
        if (islit) {
1253
            /* Note that we have handled the lit==0 case above.  */
1254
            tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1255
        } else {
1256
            TCGv shift = tcg_temp_new();
1257

    
1258
            /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1259
               Do this portably by splitting the shift into two parts:
1260
               shift_count-1 and 1.  Arrange for the -1 by using
1261
               ones-complement instead of twos-complement in the negation:
1262
               ~((B & 7) * 8) & 63.  */
1263

    
1264
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1265
            tcg_gen_shli_i64(shift, shift, 3);
1266
            tcg_gen_not_i64(shift, shift);
1267
            tcg_gen_andi_i64(shift, shift, 0x3f);
1268

    
1269
            tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1270
            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1271
            tcg_temp_free(shift);
1272
        }
1273
        tcg_temp_free(tmp);
1274
    }
1275
}
1276

    
1277
/* INSBL, INSWL, INSLL, INSQL */
1278
static void gen_ins_l(int ra, int rb, int rc, int islit,
1279
                      uint8_t lit, uint8_t byte_mask)
1280
{
1281
    if (unlikely(rc == 31))
1282
        return;
1283
    else if (unlikely(ra == 31))
1284
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1285
    else {
1286
        TCGv tmp = tcg_temp_new();
1287

    
1288
        /* The instruction description has us left-shift the byte mask
1289
           the same number of byte slots as the data and apply the zap
1290
           at the end.  This is equivalent to simply performing the zap
1291
           first and shifting afterward.  */
1292
        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1293

    
1294
        if (islit) {
1295
            tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1296
        } else {
1297
            TCGv shift = tcg_temp_new();
1298
            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1299
            tcg_gen_shli_i64(shift, shift, 3);
1300
            tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1301
            tcg_temp_free(shift);
1302
        }
1303
        tcg_temp_free(tmp);
1304
    }
1305
}
1306

    
1307
/* MSKWH, MSKLH, MSKQH */
1308
static void gen_msk_h(int ra, int rb, int rc, int islit,
1309
                      uint8_t lit, uint8_t byte_mask)
1310
{
1311
    if (unlikely(rc == 31))
1312
        return;
1313
    else if (unlikely(ra == 31))
1314
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1315
    else if (islit) {
1316
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1317
    } else {
1318
        TCGv shift = tcg_temp_new();
1319
        TCGv mask = tcg_temp_new();
1320

    
1321
        /* The instruction description is as above, where the byte_mask
1322
           is shifted left, and then we extract bits <15:8>.  This can be
1323
           emulated with a right-shift on the expanded byte mask.  This
1324
           requires extra care because for an input <2:0> == 0 we need a
1325
           shift of 64 bits in order to generate a zero.  This is done by
1326
           splitting the shift into two parts, the variable shift - 1
1327
           followed by a constant 1 shift.  The code we expand below is
1328
           equivalent to ~((B & 7) * 8) & 63.  */
1329

    
1330
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1331
        tcg_gen_shli_i64(shift, shift, 3);
1332
        tcg_gen_not_i64(shift, shift);
1333
        tcg_gen_andi_i64(shift, shift, 0x3f);
1334
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1335
        tcg_gen_shr_i64(mask, mask, shift);
1336
        tcg_gen_shri_i64(mask, mask, 1);
1337

    
1338
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1339

    
1340
        tcg_temp_free(mask);
1341
        tcg_temp_free(shift);
1342
    }
1343
}
1344

    
1345
/* MSKBL, MSKWL, MSKLL, MSKQL */
1346
static void gen_msk_l(int ra, int rb, int rc, int islit,
1347
                      uint8_t lit, uint8_t byte_mask)
1348
{
1349
    if (unlikely(rc == 31))
1350
        return;
1351
    else if (unlikely(ra == 31))
1352
        tcg_gen_movi_i64(cpu_ir[rc], 0);
1353
    else if (islit) {
1354
        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1355
    } else {
1356
        TCGv shift = tcg_temp_new();
1357
        TCGv mask = tcg_temp_new();
1358

    
1359
        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1360
        tcg_gen_shli_i64(shift, shift, 3);
1361
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1362
        tcg_gen_shl_i64(mask, mask, shift);
1363

    
1364
        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1365

    
1366
        tcg_temp_free(mask);
1367
        tcg_temp_free(shift);
1368
    }
1369
}
1370

    
1371
/* Code to call arith3 helpers */
1372
#define ARITH3(name)                                                  \
1373
static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1374
                                    uint8_t lit)                      \
1375
{                                                                     \
1376
    if (unlikely(rc == 31))                                           \
1377
        return;                                                       \
1378
                                                                      \
1379
    if (ra != 31) {                                                   \
1380
        if (islit) {                                                  \
1381
            TCGv tmp = tcg_const_i64(lit);                            \
1382
            gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp);         \
1383
            tcg_temp_free(tmp);                                       \
1384
        } else                                                        \
1385
            gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1386
    } else {                                                          \
1387
        TCGv tmp1 = tcg_const_i64(0);                                 \
1388
        if (islit) {                                                  \
1389
            TCGv tmp2 = tcg_const_i64(lit);                           \
1390
            gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2);             \
1391
            tcg_temp_free(tmp2);                                      \
1392
        } else                                                        \
1393
            gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]);       \
1394
        tcg_temp_free(tmp1);                                          \
1395
    }                                                                 \
1396
}
1397
ARITH3(cmpbge)
1398
ARITH3(addlv)
1399
ARITH3(sublv)
1400
ARITH3(addqv)
1401
ARITH3(subqv)
1402
ARITH3(umulh)
1403
ARITH3(mullv)
1404
ARITH3(mulqv)
1405
ARITH3(minub8)
1406
ARITH3(minsb8)
1407
ARITH3(minuw4)
1408
ARITH3(minsw4)
1409
ARITH3(maxub8)
1410
ARITH3(maxsb8)
1411
ARITH3(maxuw4)
1412
ARITH3(maxsw4)
1413
ARITH3(perr)
1414

    
1415
#define MVIOP2(name)                                    \
1416
static inline void glue(gen_, name)(int rb, int rc)     \
1417
{                                                       \
1418
    if (unlikely(rc == 31))                             \
1419
        return;                                         \
1420
    if (unlikely(rb == 31))                             \
1421
        tcg_gen_movi_i64(cpu_ir[rc], 0);                \
1422
    else                                                \
1423
        gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]);   \
1424
}
1425
MVIOP2(pklb)
1426
MVIOP2(pkwb)
1427
MVIOP2(unpkbl)
1428
MVIOP2(unpkbw)
1429

    
1430
static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1431
                    int islit, uint8_t lit)
1432
{
1433
    TCGv va, vb;
1434

    
1435
    if (unlikely(rc == 31)) {
1436
        return;
1437
    }
1438

    
1439
    if (ra == 31) {
1440
        va = tcg_const_i64(0);
1441
    } else {
1442
        va = cpu_ir[ra];
1443
    }
1444
    if (islit) {
1445
        vb = tcg_const_i64(lit);
1446
    } else {
1447
        vb = cpu_ir[rb];
1448
    }
1449

    
1450
    tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1451

    
1452
    if (ra == 31) {
1453
        tcg_temp_free(va);
1454
    }
1455
    if (islit) {
1456
        tcg_temp_free(vb);
1457
    }
1458
}
1459

    
1460
static void gen_rx(int ra, int set)
1461
{
1462
    TCGv_i32 tmp;
1463

    
1464
    if (ra != 31) {
1465
        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1466
    }
1467

    
1468
    tmp = tcg_const_i32(set);
1469
    tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1470
    tcg_temp_free_i32(tmp);
1471
}
1472

    
1473
static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1474
{
1475
    /* We're emulating OSF/1 PALcode.  Many of these are trivial access
1476
       to internal cpu registers.  */
1477

    
1478
    /* Unprivileged PAL call */
1479
    if (palcode >= 0x80 && palcode < 0xC0) {
1480
        switch (palcode) {
1481
        case 0x86:
1482
            /* IMB */
1483
            /* No-op inside QEMU.  */
1484
            break;
1485
        case 0x9E:
1486
            /* RDUNIQUE */
1487
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1488
            break;
1489
        case 0x9F:
1490
            /* WRUNIQUE */
1491
            tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1492
            break;
1493
        default:
1494
            return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1495
        }
1496
        return NO_EXIT;
1497
    }
1498

    
1499
#ifndef CONFIG_USER_ONLY
1500
    /* Privileged PAL code */
1501
    if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1502
        switch (palcode) {
1503
        case 0x01:
1504
            /* CFLUSH */
1505
            /* No-op inside QEMU.  */
1506
            break;
1507
        case 0x02:
1508
            /* DRAINA */
1509
            /* No-op inside QEMU.  */
1510
            break;
1511
        case 0x2D:
1512
            /* WRVPTPTR */
1513
            tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUState, vptptr));
1514
            break;
1515
        case 0x31:
1516
            /* WRVAL */
1517
            tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1518
            break;
1519
        case 0x32:
1520
            /* RDVAL */
1521
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1522
            break;
1523

    
1524
        case 0x35: {
1525
            /* SWPIPL */
1526
            TCGv tmp;
1527

    
1528
            /* Note that we already know we're in kernel mode, so we know
1529
               that PS only contains the 3 IPL bits.  */
1530
            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1531

    
1532
            /* But make sure and store only the 3 IPL bits from the user.  */
1533
            tmp = tcg_temp_new();
1534
            tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1535
            tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUState, ps));
1536
            tcg_temp_free(tmp);
1537
            break;
1538
        }
1539

    
1540
        case 0x36:
1541
            /* RDPS */
1542
            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
1543
            break;
1544
        case 0x38:
1545
            /* WRUSP */
1546
            tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1547
            break;
1548
        case 0x3A:
1549
            /* RDUSP */
1550
            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1551
            break;
1552
        case 0x3C:
1553
            /* WHAMI */
1554
            tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1555
                              offsetof(CPUState, cpu_index));
1556
            break;
1557

    
1558
        default:
1559
            return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1560
        }
1561
        return NO_EXIT;
1562
    }
1563
#endif
1564

    
1565
    return gen_invalid(ctx);
1566
}
1567

    
1568
#ifndef CONFIG_USER_ONLY
1569

    
1570
#define PR_BYTE         0x100000
1571
#define PR_LONG         0x200000
1572

    
1573
static int cpu_pr_data(int pr)
1574
{
1575
    switch (pr) {
1576
    case  0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1577
    case  1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1578
    case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1579
    case  3: return offsetof(CPUAlphaState, trap_arg0);
1580
    case  4: return offsetof(CPUAlphaState, trap_arg1);
1581
    case  5: return offsetof(CPUAlphaState, trap_arg2);
1582
    case  6: return offsetof(CPUAlphaState, exc_addr);
1583
    case  7: return offsetof(CPUAlphaState, palbr);
1584
    case  8: return offsetof(CPUAlphaState, ptbr);
1585
    case  9: return offsetof(CPUAlphaState, vptptr);
1586
    case 10: return offsetof(CPUAlphaState, unique);
1587
    case 11: return offsetof(CPUAlphaState, sysval);
1588
    case 12: return offsetof(CPUAlphaState, usp);
1589

    
1590
    case 32 ... 39:
1591
        return offsetof(CPUAlphaState, shadow[pr - 32]);
1592
    case 40 ... 63:
1593
        return offsetof(CPUAlphaState, scratch[pr - 40]);
1594
    }
1595
    return 0;
1596
}
1597

    
1598
static void gen_mfpr(int ra, int regno)
1599
{
1600
    int data = cpu_pr_data(regno);
1601

    
1602
    /* In our emulated PALcode, these processor registers have no
1603
       side effects from reading.  */
1604
    if (ra == 31) {
1605
        return;
1606
    }
1607

    
1608
    /* The basic registers are data only, and unknown registers
1609
       are read-zero, write-ignore.  */
1610
    if (data == 0) {
1611
        tcg_gen_movi_i64(cpu_ir[ra], 0);
1612
    } else if (data & PR_BYTE) {
1613
        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1614
    } else if (data & PR_LONG) {
1615
        tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1616
    } else {
1617
        tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1618
    }
1619
}
1620

    
1621
static void gen_mtpr(int rb, int regno)
1622
{
1623
    TCGv tmp;
1624
    int data;
1625

    
1626
    if (rb == 31) {
1627
        tmp = tcg_const_i64(0);
1628
    } else {
1629
        tmp = cpu_ir[rb];
1630
    }
1631

    
1632
    /* The basic registers are data only, and unknown registers
1633
       are read-zero, write-ignore.  */
1634
    data = cpu_pr_data(regno);
1635
    if (data != 0) {
1636
        if (data & PR_BYTE) {
1637
            tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1638
        } else if (data & PR_LONG) {
1639
            tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1640
        } else {
1641
            tcg_gen_st_i64(tmp, cpu_env, data);
1642
        }
1643
    }
1644

    
1645
    if (rb == 31) {
1646
        tcg_temp_free(tmp);
1647
    }
1648
}
1649
#endif /* !USER_ONLY*/
1650

    
1651
static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1652
{
1653
    uint32_t palcode;
1654
    int32_t disp21, disp16, disp12;
1655
    uint16_t fn11;
1656
    uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
1657
    uint8_t lit;
1658
    ExitStatus ret;
1659

    
1660
    /* Decode all instruction fields */
1661
    opc = insn >> 26;
1662
    ra = (insn >> 21) & 0x1F;
1663
    rb = (insn >> 16) & 0x1F;
1664
    rc = insn & 0x1F;
1665
    real_islit = islit = (insn >> 12) & 1;
1666
    if (rb == 31 && !islit) {
1667
        islit = 1;
1668
        lit = 0;
1669
    } else
1670
        lit = (insn >> 13) & 0xFF;
1671
    palcode = insn & 0x03FFFFFF;
1672
    disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1673
    disp16 = (int16_t)(insn & 0x0000FFFF);
1674
    disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1675
    fn11 = (insn >> 5) & 0x000007FF;
1676
    fpfn = fn11 & 0x3F;
1677
    fn7 = (insn >> 5) & 0x0000007F;
1678
    fn2 = (insn >> 5) & 0x00000003;
1679
    LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1680
              opc, ra, rb, rc, disp16);
1681

    
1682
    ret = NO_EXIT;
1683
    switch (opc) {
1684
    case 0x00:
1685
        /* CALL_PAL */
1686
        ret = gen_call_pal(ctx, palcode);
1687
        break;
1688
    case 0x01:
1689
        /* OPC01 */
1690
        goto invalid_opc;
1691
    case 0x02:
1692
        /* OPC02 */
1693
        goto invalid_opc;
1694
    case 0x03:
1695
        /* OPC03 */
1696
        goto invalid_opc;
1697
    case 0x04:
1698
        /* OPC04 */
1699
        goto invalid_opc;
1700
    case 0x05:
1701
        /* OPC05 */
1702
        goto invalid_opc;
1703
    case 0x06:
1704
        /* OPC06 */
1705
        goto invalid_opc;
1706
    case 0x07:
1707
        /* OPC07 */
1708
        goto invalid_opc;
1709
    case 0x08:
1710
        /* LDA */
1711
        if (likely(ra != 31)) {
1712
            if (rb != 31)
1713
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1714
            else
1715
                tcg_gen_movi_i64(cpu_ir[ra], disp16);
1716
        }
1717
        break;
1718
    case 0x09:
1719
        /* LDAH */
1720
        if (likely(ra != 31)) {
1721
            if (rb != 31)
1722
                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1723
            else
1724
                tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1725
        }
1726
        break;
1727
    case 0x0A:
1728
        /* LDBU */
1729
        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1730
            gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1731
            break;
1732
        }
1733
        goto invalid_opc;
1734
    case 0x0B:
1735
        /* LDQ_U */
1736
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1737
        break;
1738
    case 0x0C:
1739
        /* LDWU */
1740
        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1741
            gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1742
            break;
1743
        }
1744
        goto invalid_opc;
1745
    case 0x0D:
1746
        /* STW */
1747
        gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1748
        break;
1749
    case 0x0E:
1750
        /* STB */
1751
        gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1752
        break;
1753
    case 0x0F:
1754
        /* STQ_U */
1755
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1756
        break;
1757
    case 0x10:
1758
        switch (fn7) {
1759
        case 0x00:
1760
            /* ADDL */
1761
            if (likely(rc != 31)) {
1762
                if (ra != 31) {
1763
                    if (islit) {
1764
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1765
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1766
                    } else {
1767
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1768
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1769
                    }
1770
                } else {
1771
                    if (islit)
1772
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1773
                    else
1774
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1775
                }
1776
            }
1777
            break;
1778
        case 0x02:
1779
            /* S4ADDL */
1780
            if (likely(rc != 31)) {
1781
                if (ra != 31) {
1782
                    TCGv tmp = tcg_temp_new();
1783
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1784
                    if (islit)
1785
                        tcg_gen_addi_i64(tmp, tmp, lit);
1786
                    else
1787
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1788
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1789
                    tcg_temp_free(tmp);
1790
                } else {
1791
                    if (islit)
1792
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1793
                    else
1794
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1795
                }
1796
            }
1797
            break;
1798
        case 0x09:
1799
            /* SUBL */
1800
            if (likely(rc != 31)) {
1801
                if (ra != 31) {
1802
                    if (islit)
1803
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1804
                    else
1805
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1806
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1807
                } else {
1808
                    if (islit)
1809
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1810
                    else {
1811
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1812
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1813
                }
1814
            }
1815
            break;
1816
        case 0x0B:
1817
            /* S4SUBL */
1818
            if (likely(rc != 31)) {
1819
                if (ra != 31) {
1820
                    TCGv tmp = tcg_temp_new();
1821
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1822
                    if (islit)
1823
                        tcg_gen_subi_i64(tmp, tmp, lit);
1824
                    else
1825
                        tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1826
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1827
                    tcg_temp_free(tmp);
1828
                } else {
1829
                    if (islit)
1830
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1831
                    else {
1832
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1833
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1834
                    }
1835
                }
1836
            }
1837
            break;
1838
        case 0x0F:
1839
            /* CMPBGE */
1840
            gen_cmpbge(ra, rb, rc, islit, lit);
1841
            break;
1842
        case 0x12:
1843
            /* S8ADDL */
1844
            if (likely(rc != 31)) {
1845
                if (ra != 31) {
1846
                    TCGv tmp = tcg_temp_new();
1847
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1848
                    if (islit)
1849
                        tcg_gen_addi_i64(tmp, tmp, lit);
1850
                    else
1851
                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1852
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1853
                    tcg_temp_free(tmp);
1854
                } else {
1855
                    if (islit)
1856
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1857
                    else
1858
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1859
                }
1860
            }
1861
            break;
1862
        case 0x1B:
1863
            /* S8SUBL */
1864
            if (likely(rc != 31)) {
1865
                if (ra != 31) {
1866
                    TCGv tmp = tcg_temp_new();
1867
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1868
                    if (islit)
1869
                        tcg_gen_subi_i64(tmp, tmp, lit);
1870
                    else
1871
                       tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1872
                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1873
                    tcg_temp_free(tmp);
1874
                } else {
1875
                    if (islit)
1876
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1877
                    else
1878
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1879
                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1880
                    }
1881
                }
1882
            }
1883
            break;
1884
        case 0x1D:
1885
            /* CMPULT */
1886
            gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1887
            break;
1888
        case 0x20:
1889
            /* ADDQ */
1890
            if (likely(rc != 31)) {
1891
                if (ra != 31) {
1892
                    if (islit)
1893
                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1894
                    else
1895
                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1896
                } else {
1897
                    if (islit)
1898
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1899
                    else
1900
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1901
                }
1902
            }
1903
            break;
1904
        case 0x22:
1905
            /* S4ADDQ */
1906
            if (likely(rc != 31)) {
1907
                if (ra != 31) {
1908
                    TCGv tmp = tcg_temp_new();
1909
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1910
                    if (islit)
1911
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1912
                    else
1913
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1914
                    tcg_temp_free(tmp);
1915
                } else {
1916
                    if (islit)
1917
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1918
                    else
1919
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1920
                }
1921
            }
1922
            break;
1923
        case 0x29:
1924
            /* SUBQ */
1925
            if (likely(rc != 31)) {
1926
                if (ra != 31) {
1927
                    if (islit)
1928
                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1929
                    else
1930
                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1931
                } else {
1932
                    if (islit)
1933
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1934
                    else
1935
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1936
                }
1937
            }
1938
            break;
1939
        case 0x2B:
1940
            /* S4SUBQ */
1941
            if (likely(rc != 31)) {
1942
                if (ra != 31) {
1943
                    TCGv tmp = tcg_temp_new();
1944
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1945
                    if (islit)
1946
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1947
                    else
1948
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1949
                    tcg_temp_free(tmp);
1950
                } else {
1951
                    if (islit)
1952
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1953
                    else
1954
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1955
                }
1956
            }
1957
            break;
1958
        case 0x2D:
1959
            /* CMPEQ */
1960
            gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1961
            break;
1962
        case 0x32:
1963
            /* S8ADDQ */
1964
            if (likely(rc != 31)) {
1965
                if (ra != 31) {
1966
                    TCGv tmp = tcg_temp_new();
1967
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1968
                    if (islit)
1969
                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1970
                    else
1971
                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1972
                    tcg_temp_free(tmp);
1973
                } else {
1974
                    if (islit)
1975
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1976
                    else
1977
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1978
                }
1979
            }
1980
            break;
1981
        case 0x3B:
1982
            /* S8SUBQ */
1983
            if (likely(rc != 31)) {
1984
                if (ra != 31) {
1985
                    TCGv tmp = tcg_temp_new();
1986
                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1987
                    if (islit)
1988
                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1989
                    else
1990
                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1991
                    tcg_temp_free(tmp);
1992
                } else {
1993
                    if (islit)
1994
                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1995
                    else
1996
                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1997
                }
1998
            }
1999
            break;
2000
        case 0x3D:
2001
            /* CMPULE */
2002
            gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2003
            break;
2004
        case 0x40:
2005
            /* ADDL/V */
2006
            gen_addlv(ra, rb, rc, islit, lit);
2007
            break;
2008
        case 0x49:
2009
            /* SUBL/V */
2010
            gen_sublv(ra, rb, rc, islit, lit);
2011
            break;
2012
        case 0x4D:
2013
            /* CMPLT */
2014
            gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2015
            break;
2016
        case 0x60:
2017
            /* ADDQ/V */
2018
            gen_addqv(ra, rb, rc, islit, lit);
2019
            break;
2020
        case 0x69:
2021
            /* SUBQ/V */
2022
            gen_subqv(ra, rb, rc, islit, lit);
2023
            break;
2024
        case 0x6D:
2025
            /* CMPLE */
2026
            gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2027
            break;
2028
        default:
2029
            goto invalid_opc;
2030
        }
2031
        break;
2032
    case 0x11:
2033
        switch (fn7) {
2034
        case 0x00:
2035
            /* AND */
2036
            if (likely(rc != 31)) {
2037
                if (ra == 31)
2038
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2039
                else if (islit)
2040
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2041
                else
2042
                    tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2043
            }
2044
            break;
2045
        case 0x08:
2046
            /* BIC */
2047
            if (likely(rc != 31)) {
2048
                if (ra != 31) {
2049
                    if (islit)
2050
                        tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2051
                    else
2052
                        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2053
                } else
2054
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2055
            }
2056
            break;
2057
        case 0x14:
2058
            /* CMOVLBS */
2059
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2060
            break;
2061
        case 0x16:
2062
            /* CMOVLBC */
2063
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2064
            break;
2065
        case 0x20:
2066
            /* BIS */
2067
            if (likely(rc != 31)) {
2068
                if (ra != 31) {
2069
                    if (islit)
2070
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2071
                    else
2072
                        tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2073
                } else {
2074
                    if (islit)
2075
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2076
                    else
2077
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2078
                }
2079
            }
2080
            break;
2081
        case 0x24:
2082
            /* CMOVEQ */
2083
            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2084
            break;
2085
        case 0x26:
2086
            /* CMOVNE */
2087
            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2088
            break;
2089
        case 0x28:
2090
            /* ORNOT */
2091
            if (likely(rc != 31)) {
2092
                if (ra != 31) {
2093
                    if (islit)
2094
                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2095
                    else
2096
                        tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2097
                } else {
2098
                    if (islit)
2099
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2100
                    else
2101
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2102
                }
2103
            }
2104
            break;
2105
        case 0x40:
2106
            /* XOR */
2107
            if (likely(rc != 31)) {
2108
                if (ra != 31) {
2109
                    if (islit)
2110
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2111
                    else
2112
                        tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2113
                } else {
2114
                    if (islit)
2115
                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2116
                    else
2117
                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2118
                }
2119
            }
2120
            break;
2121
        case 0x44:
2122
            /* CMOVLT */
2123
            gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2124
            break;
2125
        case 0x46:
2126
            /* CMOVGE */
2127
            gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2128
            break;
2129
        case 0x48:
2130
            /* EQV */
2131
            if (likely(rc != 31)) {
2132
                if (ra != 31) {
2133
                    if (islit)
2134
                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2135
                    else
2136
                        tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2137
                } else {
2138
                    if (islit)
2139
                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2140
                    else
2141
                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2142
                }
2143
            }
2144
            break;
2145
        case 0x61:
2146
            /* AMASK */
2147
            if (likely(rc != 31)) {
2148
                uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2149

    
2150
                if (islit) {
2151
                    tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2152
                } else {
2153
                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2154
                }
2155
            }
2156
            break;
2157
        case 0x64:
2158
            /* CMOVLE */
2159
            gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2160
            break;
2161
        case 0x66:
2162
            /* CMOVGT */
2163
            gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2164
            break;
2165
        case 0x6C:
2166
            /* IMPLVER */
2167
            if (rc != 31)
2168
                tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
2169
            break;
2170
        default:
2171
            goto invalid_opc;
2172
        }
2173
        break;
2174
    case 0x12:
2175
        switch (fn7) {
2176
        case 0x02:
2177
            /* MSKBL */
2178
            gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2179
            break;
2180
        case 0x06:
2181
            /* EXTBL */
2182
            gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2183
            break;
2184
        case 0x0B:
2185
            /* INSBL */
2186
            gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2187
            break;
2188
        case 0x12:
2189
            /* MSKWL */
2190
            gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2191
            break;
2192
        case 0x16:
2193
            /* EXTWL */
2194
            gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2195
            break;
2196
        case 0x1B:
2197
            /* INSWL */
2198
            gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2199
            break;
2200
        case 0x22:
2201
            /* MSKLL */
2202
            gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2203
            break;
2204
        case 0x26:
2205
            /* EXTLL */
2206
            gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2207
            break;
2208
        case 0x2B:
2209
            /* INSLL */
2210
            gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2211
            break;
2212
        case 0x30:
2213
            /* ZAP */
2214
            gen_zap(ra, rb, rc, islit, lit);
2215
            break;
2216
        case 0x31:
2217
            /* ZAPNOT */
2218
            gen_zapnot(ra, rb, rc, islit, lit);
2219
            break;
2220
        case 0x32:
2221
            /* MSKQL */
2222
            gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2223
            break;
2224
        case 0x34:
2225
            /* SRL */
2226
            if (likely(rc != 31)) {
2227
                if (ra != 31) {
2228
                    if (islit)
2229
                        tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2230
                    else {
2231
                        TCGv shift = tcg_temp_new();
2232
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2233
                        tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2234
                        tcg_temp_free(shift);
2235
                    }
2236
                } else
2237
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2238
            }
2239
            break;
2240
        case 0x36:
2241
            /* EXTQL */
2242
            gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2243
            break;
2244
        case 0x39:
2245
            /* SLL */
2246
            if (likely(rc != 31)) {
2247
                if (ra != 31) {
2248
                    if (islit)
2249
                        tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2250
                    else {
2251
                        TCGv shift = tcg_temp_new();
2252
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2253
                        tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2254
                        tcg_temp_free(shift);
2255
                    }
2256
                } else
2257
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2258
            }
2259
            break;
2260
        case 0x3B:
2261
            /* INSQL */
2262
            gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2263
            break;
2264
        case 0x3C:
2265
            /* SRA */
2266
            if (likely(rc != 31)) {
2267
                if (ra != 31) {
2268
                    if (islit)
2269
                        tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2270
                    else {
2271
                        TCGv shift = tcg_temp_new();
2272
                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2273
                        tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2274
                        tcg_temp_free(shift);
2275
                    }
2276
                } else
2277
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2278
            }
2279
            break;
2280
        case 0x52:
2281
            /* MSKWH */
2282
            gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2283
            break;
2284
        case 0x57:
2285
            /* INSWH */
2286
            gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2287
            break;
2288
        case 0x5A:
2289
            /* EXTWH */
2290
            gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2291
            break;
2292
        case 0x62:
2293
            /* MSKLH */
2294
            gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2295
            break;
2296
        case 0x67:
2297
            /* INSLH */
2298
            gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2299
            break;
2300
        case 0x6A:
2301
            /* EXTLH */
2302
            gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2303
            break;
2304
        case 0x72:
2305
            /* MSKQH */
2306
            gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2307
            break;
2308
        case 0x77:
2309
            /* INSQH */
2310
            gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2311
            break;
2312
        case 0x7A:
2313
            /* EXTQH */
2314
            gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2315
            break;
2316
        default:
2317
            goto invalid_opc;
2318
        }
2319
        break;
2320
    case 0x13:
2321
        switch (fn7) {
2322
        case 0x00:
2323
            /* MULL */
2324
            if (likely(rc != 31)) {
2325
                if (ra == 31)
2326
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2327
                else {
2328
                    if (islit)
2329
                        tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2330
                    else
2331
                        tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2332
                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2333
                }
2334
            }
2335
            break;
2336
        case 0x20:
2337
            /* MULQ */
2338
            if (likely(rc != 31)) {
2339
                if (ra == 31)
2340
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2341
                else if (islit)
2342
                    tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2343
                else
2344
                    tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2345
            }
2346
            break;
2347
        case 0x30:
2348
            /* UMULH */
2349
            gen_umulh(ra, rb, rc, islit, lit);
2350
            break;
2351
        case 0x40:
2352
            /* MULL/V */
2353
            gen_mullv(ra, rb, rc, islit, lit);
2354
            break;
2355
        case 0x60:
2356
            /* MULQ/V */
2357
            gen_mulqv(ra, rb, rc, islit, lit);
2358
            break;
2359
        default:
2360
            goto invalid_opc;
2361
        }
2362
        break;
2363
    case 0x14:
2364
        switch (fpfn) { /* fn11 & 0x3F */
2365
        case 0x04:
2366
            /* ITOFS */
2367
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2368
                goto invalid_opc;
2369
            }
2370
            if (likely(rc != 31)) {
2371
                if (ra != 31) {
2372
                    TCGv_i32 tmp = tcg_temp_new_i32();
2373
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2374
                    gen_helper_memory_to_s(cpu_fir[rc], tmp);
2375
                    tcg_temp_free_i32(tmp);
2376
                } else
2377
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2378
            }
2379
            break;
2380
        case 0x0A:
2381
            /* SQRTF */
2382
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2383
                gen_fsqrtf(rb, rc);
2384
                break;
2385
            }
2386
            goto invalid_opc;
2387
        case 0x0B:
2388
            /* SQRTS */
2389
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2390
                gen_fsqrts(ctx, rb, rc, fn11);
2391
                break;
2392
            }
2393
            goto invalid_opc;
2394
        case 0x14:
2395
            /* ITOFF */
2396
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2397
                goto invalid_opc;
2398
            }
2399
            if (likely(rc != 31)) {
2400
                if (ra != 31) {
2401
                    TCGv_i32 tmp = tcg_temp_new_i32();
2402
                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2403
                    gen_helper_memory_to_f(cpu_fir[rc], tmp);
2404
                    tcg_temp_free_i32(tmp);
2405
                } else
2406
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2407
            }
2408
            break;
2409
        case 0x24:
2410
            /* ITOFT */
2411
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2412
                goto invalid_opc;
2413
            }
2414
            if (likely(rc != 31)) {
2415
                if (ra != 31)
2416
                    tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2417
                else
2418
                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2419
            }
2420
            break;
2421
        case 0x2A:
2422
            /* SQRTG */
2423
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2424
                gen_fsqrtg(rb, rc);
2425
                break;
2426
            }
2427
            goto invalid_opc;
2428
        case 0x02B:
2429
            /* SQRTT */
2430
            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2431
                gen_fsqrtt(ctx, rb, rc, fn11);
2432
                break;
2433
            }
2434
            goto invalid_opc;
2435
        default:
2436
            goto invalid_opc;
2437
        }
2438
        break;
2439
    case 0x15:
2440
        /* VAX floating point */
2441
        /* XXX: rounding mode and trap are ignored (!) */
2442
        switch (fpfn) { /* fn11 & 0x3F */
2443
        case 0x00:
2444
            /* ADDF */
2445
            gen_faddf(ra, rb, rc);
2446
            break;
2447
        case 0x01:
2448
            /* SUBF */
2449
            gen_fsubf(ra, rb, rc);
2450
            break;
2451
        case 0x02:
2452
            /* MULF */
2453
            gen_fmulf(ra, rb, rc);
2454
            break;
2455
        case 0x03:
2456
            /* DIVF */
2457
            gen_fdivf(ra, rb, rc);
2458
            break;
2459
        case 0x1E:
2460
            /* CVTDG */
2461
#if 0 // TODO
2462
            gen_fcvtdg(rb, rc);
2463
#else
2464
            goto invalid_opc;
2465
#endif
2466
            break;
2467
        case 0x20:
2468
            /* ADDG */
2469
            gen_faddg(ra, rb, rc);
2470
            break;
2471
        case 0x21:
2472
            /* SUBG */
2473
            gen_fsubg(ra, rb, rc);
2474
            break;
2475
        case 0x22:
2476
            /* MULG */
2477
            gen_fmulg(ra, rb, rc);
2478
            break;
2479
        case 0x23:
2480
            /* DIVG */
2481
            gen_fdivg(ra, rb, rc);
2482
            break;
2483
        case 0x25:
2484
            /* CMPGEQ */
2485
            gen_fcmpgeq(ra, rb, rc);
2486
            break;
2487
        case 0x26:
2488
            /* CMPGLT */
2489
            gen_fcmpglt(ra, rb, rc);
2490
            break;
2491
        case 0x27:
2492
            /* CMPGLE */
2493
            gen_fcmpgle(ra, rb, rc);
2494
            break;
2495
        case 0x2C:
2496
            /* CVTGF */
2497
            gen_fcvtgf(rb, rc);
2498
            break;
2499
        case 0x2D:
2500
            /* CVTGD */
2501
#if 0 // TODO
2502
            gen_fcvtgd(rb, rc);
2503
#else
2504
            goto invalid_opc;
2505
#endif
2506
            break;
2507
        case 0x2F:
2508
            /* CVTGQ */
2509
            gen_fcvtgq(rb, rc);
2510
            break;
2511
        case 0x3C:
2512
            /* CVTQF */
2513
            gen_fcvtqf(rb, rc);
2514
            break;
2515
        case 0x3E:
2516
            /* CVTQG */
2517
            gen_fcvtqg(rb, rc);
2518
            break;
2519
        default:
2520
            goto invalid_opc;
2521
        }
2522
        break;
2523
    case 0x16:
2524
        /* IEEE floating-point */
2525
        switch (fpfn) { /* fn11 & 0x3F */
2526
        case 0x00:
2527
            /* ADDS */
2528
            gen_fadds(ctx, ra, rb, rc, fn11);
2529
            break;
2530
        case 0x01:
2531
            /* SUBS */
2532
            gen_fsubs(ctx, ra, rb, rc, fn11);
2533
            break;
2534
        case 0x02:
2535
            /* MULS */
2536
            gen_fmuls(ctx, ra, rb, rc, fn11);
2537
            break;
2538
        case 0x03:
2539
            /* DIVS */
2540
            gen_fdivs(ctx, ra, rb, rc, fn11);
2541
            break;
2542
        case 0x20:
2543
            /* ADDT */
2544
            gen_faddt(ctx, ra, rb, rc, fn11);
2545
            break;
2546
        case 0x21:
2547
            /* SUBT */
2548
            gen_fsubt(ctx, ra, rb, rc, fn11);
2549
            break;
2550
        case 0x22:
2551
            /* MULT */
2552
            gen_fmult(ctx, ra, rb, rc, fn11);
2553
            break;
2554
        case 0x23:
2555
            /* DIVT */
2556
            gen_fdivt(ctx, ra, rb, rc, fn11);
2557
            break;
2558
        case 0x24:
2559
            /* CMPTUN */
2560
            gen_fcmptun(ctx, ra, rb, rc, fn11);
2561
            break;
2562
        case 0x25:
2563
            /* CMPTEQ */
2564
            gen_fcmpteq(ctx, ra, rb, rc, fn11);
2565
            break;
2566
        case 0x26:
2567
            /* CMPTLT */
2568
            gen_fcmptlt(ctx, ra, rb, rc, fn11);
2569
            break;
2570
        case 0x27:
2571
            /* CMPTLE */
2572
            gen_fcmptle(ctx, ra, rb, rc, fn11);
2573
            break;
2574
        case 0x2C:
2575
            if (fn11 == 0x2AC || fn11 == 0x6AC) {
2576
                /* CVTST */
2577
                gen_fcvtst(ctx, rb, rc, fn11);
2578
            } else {
2579
                /* CVTTS */
2580
                gen_fcvtts(ctx, rb, rc, fn11);
2581
            }
2582
            break;
2583
        case 0x2F:
2584
            /* CVTTQ */
2585
            gen_fcvttq(ctx, rb, rc, fn11);
2586
            break;
2587
        case 0x3C:
2588
            /* CVTQS */
2589
            gen_fcvtqs(ctx, rb, rc, fn11);
2590
            break;
2591
        case 0x3E:
2592
            /* CVTQT */
2593
            gen_fcvtqt(ctx, rb, rc, fn11);
2594
            break;
2595
        default:
2596
            goto invalid_opc;
2597
        }
2598
        break;
2599
    case 0x17:
2600
        switch (fn11) {
2601
        case 0x010:
2602
            /* CVTLQ */
2603
            gen_fcvtlq(rb, rc);
2604
            break;
2605
        case 0x020:
2606
            if (likely(rc != 31)) {
2607
                if (ra == rb) {
2608
                    /* FMOV */
2609
                    if (ra == 31)
2610
                        tcg_gen_movi_i64(cpu_fir[rc], 0);
2611
                    else
2612
                        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2613
                } else {
2614
                    /* CPYS */
2615
                    gen_fcpys(ra, rb, rc);
2616
                }
2617
            }
2618
            break;
2619
        case 0x021:
2620
            /* CPYSN */
2621
            gen_fcpysn(ra, rb, rc);
2622
            break;
2623
        case 0x022:
2624
            /* CPYSE */
2625
            gen_fcpyse(ra, rb, rc);
2626
            break;
2627
        case 0x024:
2628
            /* MT_FPCR */
2629
            if (likely(ra != 31))
2630
                gen_helper_store_fpcr(cpu_fir[ra]);
2631
            else {
2632
                TCGv tmp = tcg_const_i64(0);
2633
                gen_helper_store_fpcr(tmp);
2634
                tcg_temp_free(tmp);
2635
            }
2636
            break;
2637
        case 0x025:
2638
            /* MF_FPCR */
2639
            if (likely(ra != 31))
2640
                gen_helper_load_fpcr(cpu_fir[ra]);
2641
            break;
2642
        case 0x02A:
2643
            /* FCMOVEQ */
2644
            gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2645
            break;
2646
        case 0x02B:
2647
            /* FCMOVNE */
2648
            gen_fcmov(TCG_COND_NE, ra, rb, rc);
2649
            break;
2650
        case 0x02C:
2651
            /* FCMOVLT */
2652
            gen_fcmov(TCG_COND_LT, ra, rb, rc);
2653
            break;
2654
        case 0x02D:
2655
            /* FCMOVGE */
2656
            gen_fcmov(TCG_COND_GE, ra, rb, rc);
2657
            break;
2658
        case 0x02E:
2659
            /* FCMOVLE */
2660
            gen_fcmov(TCG_COND_LE, ra, rb, rc);
2661
            break;
2662
        case 0x02F:
2663
            /* FCMOVGT */
2664
            gen_fcmov(TCG_COND_GT, ra, rb, rc);
2665
            break;
2666
        case 0x030:
2667
            /* CVTQL */
2668
            gen_fcvtql(rb, rc);
2669
            break;
2670
        case 0x130:
2671
            /* CVTQL/V */
2672
        case 0x530:
2673
            /* CVTQL/SV */
2674
            /* ??? I'm pretty sure there's nothing that /sv needs to do that
2675
               /v doesn't do.  The only thing I can think is that /sv is a
2676
               valid instruction merely for completeness in the ISA.  */
2677
            gen_fcvtql_v(ctx, rb, rc);
2678
            break;
2679
        default:
2680
            goto invalid_opc;
2681
        }
2682
        break;
2683
    case 0x18:
2684
        switch ((uint16_t)disp16) {
2685
        case 0x0000:
2686
            /* TRAPB */
2687
            /* No-op.  */
2688
            break;
2689
        case 0x0400:
2690
            /* EXCB */
2691
            /* No-op.  */
2692
            break;
2693
        case 0x4000:
2694
            /* MB */
2695
            /* No-op */
2696
            break;
2697
        case 0x4400:
2698
            /* WMB */
2699
            /* No-op */
2700
            break;
2701
        case 0x8000:
2702
            /* FETCH */
2703
            /* No-op */
2704
            break;
2705
        case 0xA000:
2706
            /* FETCH_M */
2707
            /* No-op */
2708
            break;
2709
        case 0xC000:
2710
            /* RPCC */
2711
            if (ra != 31)
2712
                gen_helper_load_pcc(cpu_ir[ra]);
2713
            break;
2714
        case 0xE000:
2715
            /* RC */
2716
            gen_rx(ra, 0);
2717
            break;
2718
        case 0xE800:
2719
            /* ECB */
2720
            break;
2721
        case 0xF000:
2722
            /* RS */
2723
            gen_rx(ra, 1);
2724
            break;
2725
        case 0xF800:
2726
            /* WH64 */
2727
            /* No-op */
2728
            break;
2729
        default:
2730
            goto invalid_opc;
2731
        }
2732
        break;
2733
    case 0x19:
2734
        /* HW_MFPR (PALcode) */
2735
#ifndef CONFIG_USER_ONLY
2736
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2737
            gen_mfpr(ra, insn & 0xffff);
2738
            break;
2739
        }
2740
#endif
2741
        goto invalid_opc;
2742
    case 0x1A:
2743
        /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2744
           prediction stack action, which of course we don't implement.  */
2745
        if (rb != 31) {
2746
            tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2747
        } else {
2748
            tcg_gen_movi_i64(cpu_pc, 0);
2749
        }
2750
        if (ra != 31) {
2751
            tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2752
        }
2753
        ret = EXIT_PC_UPDATED;
2754
        break;
2755
    case 0x1B:
2756
        /* HW_LD (PALcode) */
2757
#ifndef CONFIG_USER_ONLY
2758
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2759
            TCGv addr;
2760

    
2761
            if (ra == 31) {
2762
                break;
2763
            }
2764

    
2765
            addr = tcg_temp_new();
2766
            if (rb != 31)
2767
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2768
            else
2769
                tcg_gen_movi_i64(addr, disp12);
2770
            switch ((insn >> 12) & 0xF) {
2771
            case 0x0:
2772
                /* Longword physical access (hw_ldl/p) */
2773
                gen_helper_ldl_phys(cpu_ir[ra], addr);
2774
                break;
2775
            case 0x1:
2776
                /* Quadword physical access (hw_ldq/p) */
2777
                gen_helper_ldq_phys(cpu_ir[ra], addr);
2778
                break;
2779
            case 0x2:
2780
                /* Longword physical access with lock (hw_ldl_l/p) */
2781
                gen_helper_ldl_l_phys(cpu_ir[ra], addr);
2782
                break;
2783
            case 0x3:
2784
                /* Quadword physical access with lock (hw_ldq_l/p) */
2785
                gen_helper_ldq_l_phys(cpu_ir[ra], addr);
2786
                break;
2787
            case 0x4:
2788
                /* Longword virtual PTE fetch (hw_ldl/v) */
2789
                goto invalid_opc;
2790
            case 0x5:
2791
                /* Quadword virtual PTE fetch (hw_ldq/v) */
2792
                goto invalid_opc;
2793
                break;
2794
            case 0x6:
2795
                /* Incpu_ir[ra]id */
2796
                goto invalid_opc;
2797
            case 0x7:
2798
                /* Incpu_ir[ra]id */
2799
                goto invalid_opc;
2800
            case 0x8:
2801
                /* Longword virtual access (hw_ldl) */
2802
                goto invalid_opc;
2803
            case 0x9:
2804
                /* Quadword virtual access (hw_ldq) */
2805
                goto invalid_opc;
2806
            case 0xA:
2807
                /* Longword virtual access with protection check (hw_ldl/w) */
2808
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2809
                break;
2810
            case 0xB:
2811
                /* Quadword virtual access with protection check (hw_ldq/w) */
2812
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2813
                break;
2814
            case 0xC:
2815
                /* Longword virtual access with alt access mode (hw_ldl/a)*/
2816
                goto invalid_opc;
2817
            case 0xD:
2818
                /* Quadword virtual access with alt access mode (hw_ldq/a) */
2819
                goto invalid_opc;
2820
            case 0xE:
2821
                /* Longword virtual access with alternate access mode and
2822
                   protection checks (hw_ldl/wa) */
2823
                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2824
                break;
2825
            case 0xF:
2826
                /* Quadword virtual access with alternate access mode and
2827
                   protection checks (hw_ldq/wa) */
2828
                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2829
                break;
2830
            }
2831
            tcg_temp_free(addr);
2832
            break;
2833
        }
2834
#endif
2835
        goto invalid_opc;
2836
    case 0x1C:
2837
        switch (fn7) {
2838
        case 0x00:
2839
            /* SEXTB */
2840
            if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
2841
                goto invalid_opc;
2842
            }
2843
            if (likely(rc != 31)) {
2844
                if (islit)
2845
                    tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2846
                else
2847
                    tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2848
            }
2849
            break;
2850
        case 0x01:
2851
            /* SEXTW */
2852
            if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2853
                if (likely(rc != 31)) {
2854
                    if (islit) {
2855
                        tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2856
                    } else {
2857
                        tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2858
                    }
2859
                }
2860
                break;
2861
            }
2862
            goto invalid_opc;
2863
        case 0x30:
2864
            /* CTPOP */
2865
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2866
                if (likely(rc != 31)) {
2867
                    if (islit) {
2868
                        tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2869
                    } else {
2870
                        gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2871
                    }
2872
                }
2873
                break;
2874
            }
2875
            goto invalid_opc;
2876
        case 0x31:
2877
            /* PERR */
2878
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2879
                gen_perr(ra, rb, rc, islit, lit);
2880
                break;
2881
            }
2882
            goto invalid_opc;
2883
        case 0x32:
2884
            /* CTLZ */
2885
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2886
                if (likely(rc != 31)) {
2887
                    if (islit) {
2888
                        tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2889
                    } else {
2890
                        gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2891
                    }
2892
                }
2893
                break;
2894
            }
2895
            goto invalid_opc;
2896
        case 0x33:
2897
            /* CTTZ */
2898
            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2899
                if (likely(rc != 31)) {
2900
                    if (islit) {
2901
                        tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2902
                    } else {
2903
                        gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2904
                    }
2905
                }
2906
                break;
2907
            }
2908
            goto invalid_opc;
2909
        case 0x34:
2910
            /* UNPKBW */
2911
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2912
                if (real_islit || ra != 31) {
2913
                    goto invalid_opc;
2914
                }
2915
                gen_unpkbw(rb, rc);
2916
                break;
2917
            }
2918
            goto invalid_opc;
2919
        case 0x35:
2920
            /* UNPKBL */
2921
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2922
                if (real_islit || ra != 31) {
2923
                    goto invalid_opc;
2924
                }
2925
                gen_unpkbl(rb, rc);
2926
                break;
2927
            }
2928
            goto invalid_opc;
2929
        case 0x36:
2930
            /* PKWB */
2931
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2932
                if (real_islit || ra != 31) {
2933
                    goto invalid_opc;
2934
                }
2935
                gen_pkwb(rb, rc);
2936
                break;
2937
            }
2938
            goto invalid_opc;
2939
        case 0x37:
2940
            /* PKLB */
2941
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2942
                if (real_islit || ra != 31) {
2943
                    goto invalid_opc;
2944
                }
2945
                gen_pklb(rb, rc);
2946
                break;
2947
            }
2948
            goto invalid_opc;
2949
        case 0x38:
2950
            /* MINSB8 */
2951
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2952
                gen_minsb8(ra, rb, rc, islit, lit);
2953
                break;
2954
            }
2955
            goto invalid_opc;
2956
        case 0x39:
2957
            /* MINSW4 */
2958
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2959
                gen_minsw4(ra, rb, rc, islit, lit);
2960
                break;
2961
            }
2962
            goto invalid_opc;
2963
        case 0x3A:
2964
            /* MINUB8 */
2965
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2966
                gen_minub8(ra, rb, rc, islit, lit);
2967
                break;
2968
            }
2969
            goto invalid_opc;
2970
        case 0x3B:
2971
            /* MINUW4 */
2972
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2973
                gen_minuw4(ra, rb, rc, islit, lit);
2974
                break;
2975
            }
2976
            goto invalid_opc;
2977
        case 0x3C:
2978
            /* MAXUB8 */
2979
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2980
                gen_maxub8(ra, rb, rc, islit, lit);
2981
                break;
2982
            }
2983
            goto invalid_opc;
2984
        case 0x3D:
2985
            /* MAXUW4 */
2986
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2987
                gen_maxuw4(ra, rb, rc, islit, lit);
2988
                break;
2989
            }
2990
            goto invalid_opc;
2991
        case 0x3E:
2992
            /* MAXSB8 */
2993
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2994
                gen_maxsb8(ra, rb, rc, islit, lit);
2995
                break;
2996
            }
2997
            goto invalid_opc;
2998
        case 0x3F:
2999
            /* MAXSW4 */
3000
            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3001
                gen_maxsw4(ra, rb, rc, islit, lit);
3002
                break;
3003
            }
3004
            goto invalid_opc;
3005
        case 0x70:
3006
            /* FTOIT */
3007
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3008
                goto invalid_opc;
3009
            }
3010
            if (likely(rc != 31)) {
3011
                if (ra != 31)
3012
                    tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3013
                else
3014
                    tcg_gen_movi_i64(cpu_ir[rc], 0);
3015
            }
3016
            break;
3017
        case 0x78:
3018
            /* FTOIS */
3019
            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3020
                goto invalid_opc;
3021
            }
3022
            if (rc != 31) {
3023
                TCGv_i32 tmp1 = tcg_temp_new_i32();
3024
                if (ra != 31)
3025
                    gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3026
                else {
3027
                    TCGv tmp2 = tcg_const_i64(0);
3028
                    gen_helper_s_to_memory(tmp1, tmp2);
3029
                    tcg_temp_free(tmp2);
3030
                }
3031
                tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3032
                tcg_temp_free_i32(tmp1);
3033
            }
3034
            break;
3035
        default:
3036
            goto invalid_opc;
3037
        }
3038
        break;
3039
    case 0x1D:
3040
        /* HW_MTPR (PALcode) */
3041
#ifndef CONFIG_USER_ONLY
3042
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3043
            gen_mtpr(rb, insn & 0xffff);
3044
            break;
3045
        }
3046
#endif
3047
        goto invalid_opc;
3048
    case 0x1E:
3049
        /* HW_RET (PALcode) */
3050
#ifndef CONFIG_USER_ONLY
3051
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3052
            if (rb == 31) {
3053
                /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3054
                   address from EXC_ADDR.  This turns out to be useful for our
3055
                   emulation PALcode, so continue to accept it.  */
3056
                TCGv tmp = tcg_temp_new();
3057
                tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUState, exc_addr));
3058
                gen_helper_hw_ret(tmp);
3059
                tcg_temp_free(tmp);
3060
            } else {
3061
                gen_helper_hw_ret(cpu_ir[rb]);
3062
            }
3063
            ret = EXIT_PC_UPDATED;
3064
            break;
3065
        }
3066
#endif
3067
        goto invalid_opc;
3068
    case 0x1F:
3069
        /* HW_ST (PALcode) */
3070
#ifndef CONFIG_USER_ONLY
3071
        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3072
            TCGv addr, val;
3073
            addr = tcg_temp_new();
3074
            if (rb != 31)
3075
                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3076
            else
3077
                tcg_gen_movi_i64(addr, disp12);
3078
            if (ra != 31)
3079
                val = cpu_ir[ra];
3080
            else {
3081
                val = tcg_temp_new();
3082
                tcg_gen_movi_i64(val, 0);
3083
            }
3084
            switch ((insn >> 12) & 0xF) {
3085
            case 0x0:
3086
                /* Longword physical access */
3087
                gen_helper_stl_phys(addr, val);
3088
                break;
3089
            case 0x1:
3090
                /* Quadword physical access */
3091
                gen_helper_stq_phys(addr, val);
3092
                break;
3093
            case 0x2:
3094
                /* Longword physical access with lock */
3095
                gen_helper_stl_c_phys(val, addr, val);
3096
                break;
3097
            case 0x3:
3098
                /* Quadword physical access with lock */
3099
                gen_helper_stq_c_phys(val, addr, val);
3100
                break;
3101
            case 0x4:
3102
                /* Longword virtual access */
3103
                goto invalid_opc;
3104
            case 0x5:
3105
                /* Quadword virtual access */
3106
                goto invalid_opc;
3107
            case 0x6:
3108
                /* Invalid */
3109
                goto invalid_opc;
3110
            case 0x7:
3111
                /* Invalid */
3112
                goto invalid_opc;
3113
            case 0x8:
3114
                /* Invalid */
3115
                goto invalid_opc;
3116
            case 0x9:
3117
                /* Invalid */
3118
                goto invalid_opc;
3119
            case 0xA:
3120
                /* Invalid */
3121
                goto invalid_opc;
3122
            case 0xB:
3123
                /* Invalid */
3124
                goto invalid_opc;
3125
            case 0xC:
3126
                /* Longword virtual access with alternate access mode */
3127
                goto invalid_opc;
3128
            case 0xD:
3129
                /* Quadword virtual access with alternate access mode */
3130
                goto invalid_opc;
3131
            case 0xE:
3132
                /* Invalid */
3133
                goto invalid_opc;
3134
            case 0xF:
3135
                /* Invalid */
3136
                goto invalid_opc;
3137
            }
3138
            if (ra == 31)
3139
                tcg_temp_free(val);
3140
            tcg_temp_free(addr);
3141
            break;
3142
        }
3143
#endif
3144
        goto invalid_opc;
3145
    case 0x20:
3146
        /* LDF */
3147
        gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3148
        break;
3149
    case 0x21:
3150
        /* LDG */
3151
        gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3152
        break;
3153
    case 0x22:
3154
        /* LDS */
3155
        gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3156
        break;
3157
    case 0x23:
3158
        /* LDT */
3159
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3160
        break;
3161
    case 0x24:
3162
        /* STF */
3163
        gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3164
        break;
3165
    case 0x25:
3166
        /* STG */
3167
        gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3168
        break;
3169
    case 0x26:
3170
        /* STS */
3171
        gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3172
        break;
3173
    case 0x27:
3174
        /* STT */
3175
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3176
        break;
3177
    case 0x28:
3178
        /* LDL */
3179
        gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3180
        break;
3181
    case 0x29:
3182
        /* LDQ */
3183
        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3184
        break;
3185
    case 0x2A:
3186
        /* LDL_L */
3187
        gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3188
        break;
3189
    case 0x2B:
3190
        /* LDQ_L */
3191
        gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3192
        break;
3193
    case 0x2C:
3194
        /* STL */
3195
        gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3196
        break;
3197
    case 0x2D:
3198
        /* STQ */
3199
        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3200
        break;
3201
    case 0x2E:
3202
        /* STL_C */
3203
        ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3204
        break;
3205
    case 0x2F:
3206
        /* STQ_C */
3207
        ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3208
        break;
3209
    case 0x30:
3210
        /* BR */
3211
        ret = gen_bdirect(ctx, ra, disp21);
3212
        break;
3213
    case 0x31: /* FBEQ */
3214
        ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3215
        break;
3216
    case 0x32: /* FBLT */
3217
        ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3218
        break;
3219
    case 0x33: /* FBLE */
3220
        ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3221
        break;
3222
    case 0x34:
3223
        /* BSR */
3224
        ret = gen_bdirect(ctx, ra, disp21);
3225
        break;
3226
    case 0x35: /* FBNE */
3227
        ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3228
        break;
3229
    case 0x36: /* FBGE */
3230
        ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3231
        break;
3232
    case 0x37: /* FBGT */
3233
        ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3234
        break;
3235
    case 0x38:
3236
        /* BLBC */
3237
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3238
        break;
3239
    case 0x39:
3240
        /* BEQ */
3241
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3242
        break;
3243
    case 0x3A:
3244
        /* BLT */
3245
        ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3246
        break;
3247
    case 0x3B:
3248
        /* BLE */
3249
        ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3250
        break;
3251
    case 0x3C:
3252
        /* BLBS */
3253
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3254
        break;
3255
    case 0x3D:
3256
        /* BNE */
3257
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3258
        break;
3259
    case 0x3E:
3260
        /* BGE */
3261
        ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3262
        break;
3263
    case 0x3F:
3264
        /* BGT */
3265
        ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3266
        break;
3267
    invalid_opc:
3268
        ret = gen_invalid(ctx);
3269
        break;
3270
    }
3271

    
3272
    return ret;
3273
}
3274

    
3275
static inline void gen_intermediate_code_internal(CPUState *env,
3276
                                                  TranslationBlock *tb,
3277
                                                  int search_pc)
3278
{
3279
    DisasContext ctx, *ctxp = &ctx;
3280
    target_ulong pc_start;
3281
    uint32_t insn;
3282
    uint16_t *gen_opc_end;
3283
    CPUBreakpoint *bp;
3284
    int j, lj = -1;
3285
    ExitStatus ret;
3286
    int num_insns;
3287
    int max_insns;
3288

    
3289
    pc_start = tb->pc;
3290
    gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3291

    
3292
    ctx.tb = tb;
3293
    ctx.env = env;
3294
    ctx.pc = pc_start;
3295
    ctx.mem_idx = cpu_mmu_index(env);
3296

    
3297
    /* ??? Every TB begins with unset rounding mode, to be initialized on
3298
       the first fp insn of the TB.  Alternately we could define a proper
3299
       default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3300
       to reset the FP_STATUS to that default at the end of any TB that
3301
       changes the default.  We could even (gasp) dynamiclly figure out
3302
       what default would be most efficient given the running program.  */
3303
    ctx.tb_rm = -1;
3304
    /* Similarly for flush-to-zero.  */
3305
    ctx.tb_ftz = -1;
3306

    
3307
    num_insns = 0;
3308
    max_insns = tb->cflags & CF_COUNT_MASK;
3309
    if (max_insns == 0)
3310
        max_insns = CF_COUNT_MASK;
3311

    
3312
    gen_icount_start();
3313
    do {
3314
        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3315
            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3316
                if (bp->pc == ctx.pc) {
3317
                    gen_excp(&ctx, EXCP_DEBUG, 0);
3318
                    break;
3319
                }
3320
            }
3321
        }
3322
        if (search_pc) {
3323
            j = gen_opc_ptr - gen_opc_buf;
3324
            if (lj < j) {
3325
                lj++;
3326
                while (lj < j)
3327
                    gen_opc_instr_start[lj++] = 0;
3328
            }
3329
            gen_opc_pc[lj] = ctx.pc;
3330
            gen_opc_instr_start[lj] = 1;
3331
            gen_opc_icount[lj] = num_insns;
3332
        }
3333
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3334
            gen_io_start();
3335
        insn = ldl_code(ctx.pc);
3336
        num_insns++;
3337

    
3338
        if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3339
            tcg_gen_debug_insn_start(ctx.pc);
3340
        }
3341

    
3342
        ctx.pc += 4;
3343
        ret = translate_one(ctxp, insn);
3344

    
3345
        /* If we reach a page boundary, are single stepping,
3346
           or exhaust instruction count, stop generation.  */
3347
        if (ret == NO_EXIT
3348
            && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3349
                || gen_opc_ptr >= gen_opc_end
3350
                || num_insns >= max_insns
3351
                || singlestep
3352
                || env->singlestep_enabled)) {
3353
            ret = EXIT_PC_STALE;
3354
        }
3355
    } while (ret == NO_EXIT);
3356

    
3357
    if (tb->cflags & CF_LAST_IO) {
3358
        gen_io_end();
3359
    }
3360

    
3361
    switch (ret) {
3362
    case EXIT_GOTO_TB:
3363
    case EXIT_NORETURN:
3364
        break;
3365
    case EXIT_PC_STALE:
3366
        tcg_gen_movi_i64(cpu_pc, ctx.pc);
3367
        /* FALLTHRU */
3368
    case EXIT_PC_UPDATED:
3369
        if (env->singlestep_enabled) {
3370
            gen_excp_1(EXCP_DEBUG, 0);
3371
        } else {
3372
            tcg_gen_exit_tb(0);
3373
        }
3374
        break;
3375
    default:
3376
        abort();
3377
    }
3378

    
3379
    gen_icount_end(tb, num_insns);
3380
    *gen_opc_ptr = INDEX_op_end;
3381
    if (search_pc) {
3382
        j = gen_opc_ptr - gen_opc_buf;
3383
        lj++;
3384
        while (lj <= j)
3385
            gen_opc_instr_start[lj++] = 0;
3386
    } else {
3387
        tb->size = ctx.pc - pc_start;
3388
        tb->icount = num_insns;
3389
    }
3390

    
3391
#ifdef DEBUG_DISAS
3392
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3393
        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3394
        log_target_disas(pc_start, ctx.pc - pc_start, 1);
3395
        qemu_log("\n");
3396
    }
3397
#endif
3398
}
3399

    
3400
void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3401
{
3402
    gen_intermediate_code_internal(env, tb, 0);
3403
}
3404

    
3405
void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3406
{
3407
    gen_intermediate_code_internal(env, tb, 1);
3408
}
3409

    
3410
struct cpu_def_t {
3411
    const char *name;
3412
    int implver, amask;
3413
};
3414

    
3415
static const struct cpu_def_t cpu_defs[] = {
3416
    { "ev4",   IMPLVER_2106x, 0 },
3417
    { "ev5",   IMPLVER_21164, 0 },
3418
    { "ev56",  IMPLVER_21164, AMASK_BWX },
3419
    { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3420
    { "ev6",   IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3421
    { "ev67",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3422
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3423
    { "ev68",  IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3424
                               | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3425
    { "21064", IMPLVER_2106x, 0 },
3426
    { "21164", IMPLVER_21164, 0 },
3427
    { "21164a", IMPLVER_21164, AMASK_BWX },
3428
    { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3429
    { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3430
    { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3431
                                | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3432
};
3433

    
3434
CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3435
{
3436
    CPUAlphaState *env;
3437
    int implver, amask, i, max;
3438

    
3439
    env = qemu_mallocz(sizeof(CPUAlphaState));
3440
    cpu_exec_init(env);
3441
    alpha_translate_init();
3442
    tlb_flush(env, 1);
3443

    
3444
    /* Default to ev67; no reason not to emulate insns by default.  */
3445
    implver = IMPLVER_21264;
3446
    amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3447
             | AMASK_TRAP | AMASK_PREFETCH);
3448

    
3449
    max = ARRAY_SIZE(cpu_defs);
3450
    for (i = 0; i < max; i++) {
3451
        if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3452
            implver = cpu_defs[i].implver;
3453
            amask = cpu_defs[i].amask;
3454
            break;
3455
        }
3456
    }
3457
    env->implver = implver;
3458
    env->amask = amask;
3459

    
3460
#if defined (CONFIG_USER_ONLY)
3461
    env->ps = PS_USER_MODE;
3462
    cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3463
                               | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3464
#endif
3465
    env->lock_addr = -1;
3466
    env->fen = 1;
3467

    
3468
    qemu_init_vcpu(env);
3469
    return env;
3470
}
3471

    
3472
void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
3473
{
3474
    env->pc = gen_opc_pc[pc_pos];
3475
}