Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ 9f77c1cd

History | View | Annotate | Download (42.6 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdlib.h>
21
#include "exec.h"
22

    
23
#define GETPC() (__builtin_return_address(0))
24

    
25
/*****************************************************************************/
26
/* Exceptions processing helpers */
27

    
28
void do_raise_exception_err (uint32_t exception, int error_code)
29
{
30
#if 1
31
    if (logfile && exception < 0x100)
32
        fprintf(logfile, "%s: %d %d\n", __func__, exception, error_code);
33
#endif
34
    env->exception_index = exception;
35
    env->error_code = error_code;
36
    T0 = 0;
37
    cpu_loop_exit();
38
}
39

    
40
void do_raise_exception (uint32_t exception)
41
{
42
    do_raise_exception_err(exception, 0);
43
}
44

    
45
void do_restore_state (void *pc_ptr)
46
{
47
  TranslationBlock *tb;
48
  unsigned long pc = (unsigned long) pc_ptr;
49

    
50
  tb = tb_find_pc (pc);
51
  cpu_restore_state (tb, env, pc, NULL);
52
}
53

    
54
void do_raise_exception_direct_err (uint32_t exception, int error_code)
55
{
56
    do_restore_state (GETPC ());
57
    do_raise_exception_err (exception, error_code);
58
}
59

    
60
void do_raise_exception_direct (uint32_t exception)
61
{
62
    do_raise_exception_direct_err (exception, 0);
63
}
64

    
65
#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
66
#if TARGET_LONG_BITS > HOST_LONG_BITS
67
/* Those might call libgcc functions.  */
68
void do_dsll (void)
69
{
70
    T0 = T0 << T1;
71
}
72

    
73
void do_dsll32 (void)
74
{
75
    T0 = T0 << (T1 + 32);
76
}
77

    
78
void do_dsra (void)
79
{
80
    T0 = (int64_t)T0 >> T1;
81
}
82

    
83
void do_dsra32 (void)
84
{
85
    T0 = (int64_t)T0 >> (T1 + 32);
86
}
87

    
88
void do_dsrl (void)
89
{
90
    T0 = T0 >> T1;
91
}
92

    
93
void do_dsrl32 (void)
94
{
95
    T0 = T0 >> (T1 + 32);
96
}
97

    
98
void do_drotr (void)
99
{
100
    target_ulong tmp;
101

    
102
    if (T1) {
103
       tmp = T0 << (0x40 - T1);
104
       T0 = (T0 >> T1) | tmp;
105
    }
106
}
107

    
108
void do_drotr32 (void)
109
{
110
    target_ulong tmp;
111

    
112
    if (T1) {
113
       tmp = T0 << (0x40 - (32 + T1));
114
       T0 = (T0 >> (32 + T1)) | tmp;
115
    }
116
}
117

    
118
void do_dsllv (void)
119
{
120
    T0 = T1 << (T0 & 0x3F);
121
}
122

    
123
void do_dsrav (void)
124
{
125
    T0 = (int64_t)T1 >> (T0 & 0x3F);
126
}
127

    
128
void do_dsrlv (void)
129
{
130
    T0 = T1 >> (T0 & 0x3F);
131
}
132

    
133
void do_drotrv (void)
134
{
135
    target_ulong tmp;
136

    
137
    T0 &= 0x3F;
138
    if (T0) {
139
       tmp = T1 << (0x40 - T0);
140
       T0 = (T1 >> T0) | tmp;
141
    } else
142
       T0 = T1;
143
}
144
#endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
145
#endif /* TARGET_MIPSN32 || TARGET_MIPS64 */
146

    
147
/* 64 bits arithmetic for 32 bits hosts */
148
#if TARGET_LONG_BITS > HOST_LONG_BITS
149
static always_inline uint64_t get_HILO (void)
150
{
151
    return (env->HI[0][env->current_tc] << 32) | (uint32_t)env->LO[0][env->current_tc];
152
}
153

    
154
static always_inline void set_HILO (uint64_t HILO)
155
{
156
    env->LO[0][env->current_tc] = (int32_t)HILO;
157
    env->HI[0][env->current_tc] = (int32_t)(HILO >> 32);
158
}
159

    
160
void do_mult (void)
161
{
162
    set_HILO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
163
}
164

    
165
void do_multu (void)
166
{
167
    set_HILO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
168
}
169

    
170
void do_madd (void)
171
{
172
    int64_t tmp;
173

    
174
    tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
175
    set_HILO((int64_t)get_HILO() + tmp);
176
}
177

    
178
void do_maddu (void)
179
{
180
    uint64_t tmp;
181

    
182
    tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
183
    set_HILO(get_HILO() + tmp);
184
}
185

    
186
void do_msub (void)
187
{
188
    int64_t tmp;
189

    
190
    tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
191
    set_HILO((int64_t)get_HILO() - tmp);
192
}
193

    
194
void do_msubu (void)
195
{
196
    uint64_t tmp;
197

    
198
    tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
199
    set_HILO(get_HILO() - tmp);
200
}
201
#endif
202

    
203
#if HOST_LONG_BITS < 64
204
void do_div (void)
205
{
206
    /* 64bit datatypes because we may see overflow/underflow. */
207
    if (T1 != 0) {
208
        env->LO[0][env->current_tc] = (int32_t)((int64_t)(int32_t)T0 / (int32_t)T1);
209
        env->HI[0][env->current_tc] = (int32_t)((int64_t)(int32_t)T0 % (int32_t)T1);
210
    }
211
}
212
#endif
213

    
214
#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
215
void do_ddiv (void)
216
{
217
    if (T1 != 0) {
218
        lldiv_t res = lldiv((int64_t)T0, (int64_t)T1);
219
        env->LO[0][env->current_tc] = res.quot;
220
        env->HI[0][env->current_tc] = res.rem;
221
    }
222
}
223

    
224
#if TARGET_LONG_BITS > HOST_LONG_BITS
225
void do_ddivu (void)
226
{
227
    if (T1 != 0) {
228
        env->LO[0][env->current_tc] = T0 / T1;
229
        env->HI[0][env->current_tc] = T0 % T1;
230
    }
231
}
232
#endif
233
#endif /* TARGET_MIPSN32 || TARGET_MIPS64 */
234

    
235
#if defined(CONFIG_USER_ONLY)
236
void do_mfc0_random (void)
237
{
238
    cpu_abort(env, "mfc0 random\n");
239
}
240

    
241
void do_mfc0_count (void)
242
{
243
    cpu_abort(env, "mfc0 count\n");
244
}
245

    
246
void cpu_mips_store_count(CPUState *env, uint32_t value)
247
{
248
    cpu_abort(env, "mtc0 count\n");
249
}
250

    
251
void cpu_mips_store_compare(CPUState *env, uint32_t value)
252
{
253
    cpu_abort(env, "mtc0 compare\n");
254
}
255

    
256
void cpu_mips_start_count(CPUState *env)
257
{
258
    cpu_abort(env, "start count\n");
259
}
260

    
261
void cpu_mips_stop_count(CPUState *env)
262
{
263
    cpu_abort(env, "stop count\n");
264
}
265

    
266
void cpu_mips_update_irq(CPUState *env)
267
{
268
    cpu_abort(env, "mtc0 status / mtc0 cause\n");
269
}
270

    
271
void do_mtc0_status_debug(uint32_t old, uint32_t val)
272
{
273
    cpu_abort(env, "mtc0 status debug\n");
274
}
275

    
276
void do_mtc0_status_irqraise_debug (void)
277
{
278
    cpu_abort(env, "mtc0 status irqraise debug\n");
279
}
280

    
281
void cpu_mips_tlb_flush (CPUState *env, int flush_global)
282
{
283
    cpu_abort(env, "mips_tlb_flush\n");
284
}
285

    
286
#else
287

    
288
/* CP0 helpers */
289
void do_mfc0_random (void)
290
{
291
    T0 = (int32_t)cpu_mips_get_random(env);
292
}
293

    
294
void do_mfc0_count (void)
295
{
296
    T0 = (int32_t)cpu_mips_get_count(env);
297
}
298

    
299
void do_mtc0_status_debug(uint32_t old, uint32_t val)
300
{
301
    fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",
302
            old, old & env->CP0_Cause & CP0Ca_IP_mask,
303
            val, val & env->CP0_Cause & CP0Ca_IP_mask,
304
            env->CP0_Cause);
305
    (env->hflags & MIPS_HFLAG_UM) ? fputs(", UM\n", logfile)
306
                                  : fputs("\n", logfile);
307
}
308

    
309
void do_mtc0_status_irqraise_debug(void)
310
{
311
    fprintf(logfile, "Raise pending IRQs\n");
312
}
313

    
314
void fpu_handle_exception(void)
315
{
316
#ifdef CONFIG_SOFTFLOAT
317
    int flags = get_float_exception_flags(&env->fpu->fp_status);
318
    unsigned int cpuflags = 0, enable, cause = 0;
319

    
320
    enable = GET_FP_ENABLE(env->fpu->fcr31);
321

    
322
    /* determine current flags */
323
    if (flags & float_flag_invalid) {
324
        cpuflags |= FP_INVALID;
325
        cause |= FP_INVALID & enable;
326
    }
327
    if (flags & float_flag_divbyzero) {
328
        cpuflags |= FP_DIV0;
329
        cause |= FP_DIV0 & enable;
330
    }
331
    if (flags & float_flag_overflow) {
332
        cpuflags |= FP_OVERFLOW;
333
        cause |= FP_OVERFLOW & enable;
334
    }
335
    if (flags & float_flag_underflow) {
336
        cpuflags |= FP_UNDERFLOW;
337
        cause |= FP_UNDERFLOW & enable;
338
    }
339
    if (flags & float_flag_inexact) {
340
        cpuflags |= FP_INEXACT;
341
        cause |= FP_INEXACT & enable;
342
    }
343
    SET_FP_FLAGS(env->fpu->fcr31, cpuflags);
344
    SET_FP_CAUSE(env->fpu->fcr31, cause);
345
#else
346
    SET_FP_FLAGS(env->fpu->fcr31, 0);
347
    SET_FP_CAUSE(env->fpu->fcr31, 0);
348
#endif
349
}
350

    
351
/* TLB management */
352
void cpu_mips_tlb_flush (CPUState *env, int flush_global)
353
{
354
    /* Flush qemu's TLB and discard all shadowed entries.  */
355
    tlb_flush (env, flush_global);
356
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
357
}
358

    
359
static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
360
{
361
    /* Discard entries from env->tlb[first] onwards.  */
362
    while (env->tlb->tlb_in_use > first) {
363
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
364
    }
365
}
366

    
367
static void r4k_fill_tlb (int idx)
368
{
369
    r4k_tlb_t *tlb;
370

    
371
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
372
    tlb = &env->tlb->mmu.r4k.tlb[idx];
373
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
374
#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
375
    tlb->VPN &= env->SEGMask;
376
#endif
377
    tlb->ASID = env->CP0_EntryHi & 0xFF;
378
    tlb->PageMask = env->CP0_PageMask;
379
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
380
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
381
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
382
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
383
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
384
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
385
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
386
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
387
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
388
}
389

    
390
void r4k_do_tlbwi (void)
391
{
392
    /* Discard cached TLB entries.  We could avoid doing this if the
393
       tlbwi is just upgrading access permissions on the current entry;
394
       that might be a further win.  */
395
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
396

    
397
    r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb, 0);
398
    r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
399
}
400

    
401
void r4k_do_tlbwr (void)
402
{
403
    int r = cpu_mips_get_random(env);
404

    
405
    r4k_invalidate_tlb(env, r, 1);
406
    r4k_fill_tlb(r);
407
}
408

    
409
void r4k_do_tlbp (void)
410
{
411
    r4k_tlb_t *tlb;
412
    target_ulong mask;
413
    target_ulong tag;
414
    target_ulong VPN;
415
    uint8_t ASID;
416
    int i;
417

    
418
    ASID = env->CP0_EntryHi & 0xFF;
419
    for (i = 0; i < env->tlb->nb_tlb; i++) {
420
        tlb = &env->tlb->mmu.r4k.tlb[i];
421
        /* 1k pages are not supported. */
422
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
423
        tag = env->CP0_EntryHi & ~mask;
424
        VPN = tlb->VPN & ~mask;
425
        /* Check ASID, virtual page number & size */
426
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
427
            /* TLB match */
428
            env->CP0_Index = i;
429
            break;
430
        }
431
    }
432
    if (i == env->tlb->nb_tlb) {
433
        /* No match.  Discard any shadow entries, if any of them match.  */
434
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
435
            tlb = &env->tlb->mmu.r4k.tlb[i];
436
            /* 1k pages are not supported. */
437
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
438
            tag = env->CP0_EntryHi & ~mask;
439
            VPN = tlb->VPN & ~mask;
440
            /* Check ASID, virtual page number & size */
441
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
442
                r4k_mips_tlb_flush_extra (env, i);
443
                break;
444
            }
445
        }
446

    
447
        env->CP0_Index |= 0x80000000;
448
    }
449
}
450

    
451
void r4k_do_tlbr (void)
452
{
453
    r4k_tlb_t *tlb;
454
    uint8_t ASID;
455

    
456
    ASID = env->CP0_EntryHi & 0xFF;
457
    tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
458

    
459
    /* If this will change the current ASID, flush qemu's TLB.  */
460
    if (ASID != tlb->ASID)
461
        cpu_mips_tlb_flush (env, 1);
462

    
463
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
464

    
465
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
466
    env->CP0_PageMask = tlb->PageMask;
467
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
468
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
469
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
470
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
471
}
472

    
473
#endif /* !CONFIG_USER_ONLY */
474

    
475
void dump_ldst (const unsigned char *func)
476
{
477
    if (loglevel)
478
        fprintf(logfile, "%s => " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, T0, T1);
479
}
480

    
481
void dump_sc (void)
482
{
483
    if (loglevel) {
484
        fprintf(logfile, "%s " TARGET_FMT_lx " at " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", __func__,
485
                T1, T0, env->CP0_LLAddr);
486
    }
487
}
488

    
489
void debug_pre_eret (void)
490
{
491
    fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
492
            env->PC[env->current_tc], env->CP0_EPC);
493
    if (env->CP0_Status & (1 << CP0St_ERL))
494
        fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
495
    if (env->hflags & MIPS_HFLAG_DM)
496
        fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
497
    fputs("\n", logfile);
498
}
499

    
500
void debug_post_eret (void)
501
{
502
    fprintf(logfile, "  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
503
            env->PC[env->current_tc], env->CP0_EPC);
504
    if (env->CP0_Status & (1 << CP0St_ERL))
505
        fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
506
    if (env->hflags & MIPS_HFLAG_DM)
507
        fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
508
    if (env->hflags & MIPS_HFLAG_UM)
509
        fputs(", UM\n", logfile);
510
    else
511
        fputs("\n", logfile);
512
}
513

    
514
void do_pmon (int function)
515
{
516
    function /= 2;
517
    switch (function) {
518
    case 2: /* TODO: char inbyte(int waitflag); */
519
        if (env->gpr[4][env->current_tc] == 0)
520
            env->gpr[2][env->current_tc] = -1;
521
        /* Fall through */
522
    case 11: /* TODO: char inbyte (void); */
523
        env->gpr[2][env->current_tc] = -1;
524
        break;
525
    case 3:
526
    case 12:
527
        printf("%c", (char)(env->gpr[4][env->current_tc] & 0xFF));
528
        break;
529
    case 17:
530
        break;
531
    case 158:
532
        {
533
            unsigned char *fmt = (void *)(unsigned long)env->gpr[4][env->current_tc];
534
            printf("%s", fmt);
535
        }
536
        break;
537
    }
538
}
539

    
540
#if !defined(CONFIG_USER_ONLY)
541

    
542
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
543

    
544
#define MMUSUFFIX _mmu
545
#define ALIGNED_ONLY
546

    
547
#define SHIFT 0
548
#include "softmmu_template.h"
549

    
550
#define SHIFT 1
551
#include "softmmu_template.h"
552

    
553
#define SHIFT 2
554
#include "softmmu_template.h"
555

    
556
#define SHIFT 3
557
#include "softmmu_template.h"
558

    
559
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
560
{
561
    env->CP0_BadVAddr = addr;
562
    do_restore_state (retaddr);
563
    do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
564
}
565

    
566
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
567
{
568
    TranslationBlock *tb;
569
    CPUState *saved_env;
570
    unsigned long pc;
571
    int ret;
572

    
573
    /* XXX: hack to restore env in all cases, even if not called from
574
       generated code */
575
    saved_env = env;
576
    env = cpu_single_env;
577
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
578
    if (ret) {
579
        if (retaddr) {
580
            /* now we have a real cpu fault */
581
            pc = (unsigned long)retaddr;
582
            tb = tb_find_pc(pc);
583
            if (tb) {
584
                /* the PC is inside the translated code. It means that we have
585
                   a virtual CPU fault */
586
                cpu_restore_state(tb, env, pc, NULL);
587
            }
588
        }
589
        do_raise_exception_err(env->exception_index, env->error_code);
590
    }
591
    env = saved_env;
592
}
593

    
594
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
595
                          int unused)
596
{
597
    if (is_exec)
598
        do_raise_exception(EXCP_IBE);
599
    else
600
        do_raise_exception(EXCP_DBE);
601
}
602
#endif
603

    
604
/* Complex FPU operations which may need stack space. */
605

    
606
#define FLOAT_SIGN32 (1 << 31)
607
#define FLOAT_SIGN64 (1ULL << 63)
608
#define FLOAT_ONE32 (0x3f8 << 20)
609
#define FLOAT_ONE64 (0x3ffULL << 52)
610
#define FLOAT_TWO32 (1 << 30)
611
#define FLOAT_TWO64 (1ULL << 62)
612
#define FLOAT_QNAN32 0x7fbfffff
613
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
614
#define FLOAT_SNAN32 0x7fffffff
615
#define FLOAT_SNAN64 0x7fffffffffffffffULL
616

    
617
/* convert MIPS rounding mode in FCR31 to IEEE library */
618
unsigned int ieee_rm[] = {
619
    float_round_nearest_even,
620
    float_round_to_zero,
621
    float_round_up,
622
    float_round_down
623
};
624

    
625
#define RESTORE_ROUNDING_MODE \
626
    set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
627

    
628
void do_cfc1 (int reg)
629
{
630
    switch (reg) {
631
    case 0:
632
        T0 = (int32_t)env->fpu->fcr0;
633
        break;
634
    case 25:
635
        T0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1);
636
        break;
637
    case 26:
638
        T0 = env->fpu->fcr31 & 0x0003f07c;
639
        break;
640
    case 28:
641
        T0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4);
642
        break;
643
    default:
644
        T0 = (int32_t)env->fpu->fcr31;
645
        break;
646
    }
647
}
648

    
649
void do_ctc1 (int reg)
650
{
651
    switch(reg) {
652
    case 25:
653
        if (T0 & 0xffffff00)
654
            return;
655
        env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((T0 & 0xfe) << 24) |
656
                     ((T0 & 0x1) << 23);
657
        break;
658
    case 26:
659
        if (T0 & 0x007c0000)
660
            return;
661
        env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (T0 & 0x0003f07c);
662
        break;
663
    case 28:
664
        if (T0 & 0x007c0000)
665
            return;
666
        env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (T0 & 0x00000f83) |
667
                     ((T0 & 0x4) << 22);
668
        break;
669
    case 31:
670
        if (T0 & 0x007c0000)
671
            return;
672
        env->fpu->fcr31 = T0;
673
        break;
674
    default:
675
        return;
676
    }
677
    /* set rounding mode */
678
    RESTORE_ROUNDING_MODE;
679
    set_float_exception_flags(0, &env->fpu->fp_status);
680
    if ((GET_FP_ENABLE(env->fpu->fcr31) | 0x20) & GET_FP_CAUSE(env->fpu->fcr31))
681
        do_raise_exception(EXCP_FPE);
682
}
683

    
684
static always_inline char ieee_ex_to_mips(char xcpt)
685
{
686
    return (xcpt & float_flag_inexact) >> 5 |
687
           (xcpt & float_flag_underflow) >> 3 |
688
           (xcpt & float_flag_overflow) >> 1 |
689
           (xcpt & float_flag_divbyzero) << 1 |
690
           (xcpt & float_flag_invalid) << 4;
691
}
692

    
693
static always_inline char mips_ex_to_ieee(char xcpt)
694
{
695
    return (xcpt & FP_INEXACT) << 5 |
696
           (xcpt & FP_UNDERFLOW) << 3 |
697
           (xcpt & FP_OVERFLOW) << 1 |
698
           (xcpt & FP_DIV0) >> 1 |
699
           (xcpt & FP_INVALID) >> 4;
700
}
701

    
702
static always_inline void update_fcr31(void)
703
{
704
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->fpu->fp_status));
705

    
706
    SET_FP_CAUSE(env->fpu->fcr31, tmp);
707
    if (GET_FP_ENABLE(env->fpu->fcr31) & tmp)
708
        do_raise_exception(EXCP_FPE);
709
    else
710
        UPDATE_FP_FLAGS(env->fpu->fcr31, tmp);
711
}
712

    
713
#define FLOAT_OP(name, p) void do_float_##name##_##p(void)
714

    
715
FLOAT_OP(cvtd, s)
716
{
717
    set_float_exception_flags(0, &env->fpu->fp_status);
718
    FDT2 = float32_to_float64(FST0, &env->fpu->fp_status);
719
    update_fcr31();
720
}
721
FLOAT_OP(cvtd, w)
722
{
723
    set_float_exception_flags(0, &env->fpu->fp_status);
724
    FDT2 = int32_to_float64(WT0, &env->fpu->fp_status);
725
    update_fcr31();
726
}
727
FLOAT_OP(cvtd, l)
728
{
729
    set_float_exception_flags(0, &env->fpu->fp_status);
730
    FDT2 = int64_to_float64(DT0, &env->fpu->fp_status);
731
    update_fcr31();
732
}
733
FLOAT_OP(cvtl, d)
734
{
735
    set_float_exception_flags(0, &env->fpu->fp_status);
736
    DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
737
    update_fcr31();
738
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
739
        DT2 = FLOAT_SNAN64;
740
}
741
FLOAT_OP(cvtl, s)
742
{
743
    set_float_exception_flags(0, &env->fpu->fp_status);
744
    DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
745
    update_fcr31();
746
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
747
        DT2 = FLOAT_SNAN64;
748
}
749

    
750
FLOAT_OP(cvtps, pw)
751
{
752
    set_float_exception_flags(0, &env->fpu->fp_status);
753
    FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
754
    FSTH2 = int32_to_float32(WTH0, &env->fpu->fp_status);
755
    update_fcr31();
756
}
757
FLOAT_OP(cvtpw, ps)
758
{
759
    set_float_exception_flags(0, &env->fpu->fp_status);
760
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
761
    WTH2 = float32_to_int32(FSTH0, &env->fpu->fp_status);
762
    update_fcr31();
763
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
764
        WT2 = FLOAT_SNAN32;
765
}
766
FLOAT_OP(cvts, d)
767
{
768
    set_float_exception_flags(0, &env->fpu->fp_status);
769
    FST2 = float64_to_float32(FDT0, &env->fpu->fp_status);
770
    update_fcr31();
771
}
772
FLOAT_OP(cvts, w)
773
{
774
    set_float_exception_flags(0, &env->fpu->fp_status);
775
    FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
776
    update_fcr31();
777
}
778
FLOAT_OP(cvts, l)
779
{
780
    set_float_exception_flags(0, &env->fpu->fp_status);
781
    FST2 = int64_to_float32(DT0, &env->fpu->fp_status);
782
    update_fcr31();
783
}
784
FLOAT_OP(cvts, pl)
785
{
786
    set_float_exception_flags(0, &env->fpu->fp_status);
787
    WT2 = WT0;
788
    update_fcr31();
789
}
790
FLOAT_OP(cvts, pu)
791
{
792
    set_float_exception_flags(0, &env->fpu->fp_status);
793
    WT2 = WTH0;
794
    update_fcr31();
795
}
796
FLOAT_OP(cvtw, s)
797
{
798
    set_float_exception_flags(0, &env->fpu->fp_status);
799
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
800
    update_fcr31();
801
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
802
        WT2 = FLOAT_SNAN32;
803
}
804
FLOAT_OP(cvtw, d)
805
{
806
    set_float_exception_flags(0, &env->fpu->fp_status);
807
    WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
808
    update_fcr31();
809
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
810
        WT2 = FLOAT_SNAN32;
811
}
812

    
813
FLOAT_OP(roundl, d)
814
{
815
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
816
    DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
817
    RESTORE_ROUNDING_MODE;
818
    update_fcr31();
819
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
820
        DT2 = FLOAT_SNAN64;
821
}
822
FLOAT_OP(roundl, s)
823
{
824
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
825
    DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
826
    RESTORE_ROUNDING_MODE;
827
    update_fcr31();
828
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
829
        DT2 = FLOAT_SNAN64;
830
}
831
FLOAT_OP(roundw, d)
832
{
833
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
834
    WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
835
    RESTORE_ROUNDING_MODE;
836
    update_fcr31();
837
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
838
        WT2 = FLOAT_SNAN32;
839
}
840
FLOAT_OP(roundw, s)
841
{
842
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
843
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
844
    RESTORE_ROUNDING_MODE;
845
    update_fcr31();
846
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
847
        WT2 = FLOAT_SNAN32;
848
}
849

    
850
FLOAT_OP(truncl, d)
851
{
852
    DT2 = float64_to_int64_round_to_zero(FDT0, &env->fpu->fp_status);
853
    update_fcr31();
854
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
855
        DT2 = FLOAT_SNAN64;
856
}
857
FLOAT_OP(truncl, s)
858
{
859
    DT2 = float32_to_int64_round_to_zero(FST0, &env->fpu->fp_status);
860
    update_fcr31();
861
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
862
        DT2 = FLOAT_SNAN64;
863
}
864
FLOAT_OP(truncw, d)
865
{
866
    WT2 = float64_to_int32_round_to_zero(FDT0, &env->fpu->fp_status);
867
    update_fcr31();
868
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
869
        WT2 = FLOAT_SNAN32;
870
}
871
FLOAT_OP(truncw, s)
872
{
873
    WT2 = float32_to_int32_round_to_zero(FST0, &env->fpu->fp_status);
874
    update_fcr31();
875
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
876
        WT2 = FLOAT_SNAN32;
877
}
878

    
879
FLOAT_OP(ceill, d)
880
{
881
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
882
    DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
883
    RESTORE_ROUNDING_MODE;
884
    update_fcr31();
885
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
886
        DT2 = FLOAT_SNAN64;
887
}
888
FLOAT_OP(ceill, s)
889
{
890
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
891
    DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
892
    RESTORE_ROUNDING_MODE;
893
    update_fcr31();
894
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
895
        DT2 = FLOAT_SNAN64;
896
}
897
FLOAT_OP(ceilw, d)
898
{
899
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
900
    WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
901
    RESTORE_ROUNDING_MODE;
902
    update_fcr31();
903
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
904
        WT2 = FLOAT_SNAN32;
905
}
906
FLOAT_OP(ceilw, s)
907
{
908
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
909
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
910
    RESTORE_ROUNDING_MODE;
911
    update_fcr31();
912
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
913
        WT2 = FLOAT_SNAN32;
914
}
915

    
916
FLOAT_OP(floorl, d)
917
{
918
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
919
    DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
920
    RESTORE_ROUNDING_MODE;
921
    update_fcr31();
922
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
923
        DT2 = FLOAT_SNAN64;
924
}
925
FLOAT_OP(floorl, s)
926
{
927
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
928
    DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
929
    RESTORE_ROUNDING_MODE;
930
    update_fcr31();
931
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
932
        DT2 = FLOAT_SNAN64;
933
}
934
FLOAT_OP(floorw, d)
935
{
936
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
937
    WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
938
    RESTORE_ROUNDING_MODE;
939
    update_fcr31();
940
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
941
        WT2 = FLOAT_SNAN32;
942
}
943
FLOAT_OP(floorw, s)
944
{
945
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
946
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
947
    RESTORE_ROUNDING_MODE;
948
    update_fcr31();
949
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
950
        WT2 = FLOAT_SNAN32;
951
}
952

    
953
/* MIPS specific unary operations */
954
FLOAT_OP(recip, d)
955
{
956
    set_float_exception_flags(0, &env->fpu->fp_status);
957
    FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
958
    update_fcr31();
959
}
960
FLOAT_OP(recip, s)
961
{
962
    set_float_exception_flags(0, &env->fpu->fp_status);
963
    FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
964
    update_fcr31();
965
}
966

    
967
FLOAT_OP(rsqrt, d)
968
{
969
    set_float_exception_flags(0, &env->fpu->fp_status);
970
    FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
971
    FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
972
    update_fcr31();
973
}
974
FLOAT_OP(rsqrt, s)
975
{
976
    set_float_exception_flags(0, &env->fpu->fp_status);
977
    FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
978
    FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
979
    update_fcr31();
980
}
981

    
982
FLOAT_OP(recip1, d)
983
{
984
    set_float_exception_flags(0, &env->fpu->fp_status);
985
    FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
986
    update_fcr31();
987
}
988
FLOAT_OP(recip1, s)
989
{
990
    set_float_exception_flags(0, &env->fpu->fp_status);
991
    FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
992
    update_fcr31();
993
}
994
FLOAT_OP(recip1, ps)
995
{
996
    set_float_exception_flags(0, &env->fpu->fp_status);
997
    FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
998
    FSTH2 = float32_div(FLOAT_ONE32, FSTH0, &env->fpu->fp_status);
999
    update_fcr31();
1000
}
1001

    
1002
FLOAT_OP(rsqrt1, d)
1003
{
1004
    set_float_exception_flags(0, &env->fpu->fp_status);
1005
    FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
1006
    FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
1007
    update_fcr31();
1008
}
1009
FLOAT_OP(rsqrt1, s)
1010
{
1011
    set_float_exception_flags(0, &env->fpu->fp_status);
1012
    FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1013
    FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1014
    update_fcr31();
1015
}
1016
FLOAT_OP(rsqrt1, ps)
1017
{
1018
    set_float_exception_flags(0, &env->fpu->fp_status);
1019
    FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1020
    FSTH2 = float32_sqrt(FSTH0, &env->fpu->fp_status);
1021
    FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1022
    FSTH2 = float32_div(FLOAT_ONE32, FSTH2, &env->fpu->fp_status);
1023
    update_fcr31();
1024
}
1025

    
1026
/* binary operations */
1027
#define FLOAT_BINOP(name) \
1028
FLOAT_OP(name, d)         \
1029
{                         \
1030
    set_float_exception_flags(0, &env->fpu->fp_status);            \
1031
    FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status);    \
1032
    update_fcr31();                                                \
1033
    if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID)                \
1034
        FDT2 = FLOAT_QNAN64;                                       \
1035
}                         \
1036
FLOAT_OP(name, s)         \
1037
{                         \
1038
    set_float_exception_flags(0, &env->fpu->fp_status);            \
1039
    FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status);    \
1040
    update_fcr31();                                                \
1041
    if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID)                \
1042
        FST2 = FLOAT_QNAN32;                                       \
1043
}                         \
1044
FLOAT_OP(name, ps)        \
1045
{                         \
1046
    set_float_exception_flags(0, &env->fpu->fp_status);            \
1047
    FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status);    \
1048
    FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \
1049
    update_fcr31();       \
1050
    if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) {              \
1051
        FST2 = FLOAT_QNAN32;                                       \
1052
        FSTH2 = FLOAT_QNAN32;                                      \
1053
    }                     \
1054
}
1055
FLOAT_BINOP(add)
1056
FLOAT_BINOP(sub)
1057
FLOAT_BINOP(mul)
1058
FLOAT_BINOP(div)
1059
#undef FLOAT_BINOP
1060

    
1061
/* MIPS specific binary operations */
1062
FLOAT_OP(recip2, d)
1063
{
1064
    set_float_exception_flags(0, &env->fpu->fp_status);
1065
    FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1066
    FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status) ^ FLOAT_SIGN64;
1067
    update_fcr31();
1068
}
1069
FLOAT_OP(recip2, s)
1070
{
1071
    set_float_exception_flags(0, &env->fpu->fp_status);
1072
    FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1073
    FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1074
    update_fcr31();
1075
}
1076
FLOAT_OP(recip2, ps)
1077
{
1078
    set_float_exception_flags(0, &env->fpu->fp_status);
1079
    FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1080
    FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1081
    FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1082
    FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1083
    update_fcr31();
1084
}
1085

    
1086
FLOAT_OP(rsqrt2, d)
1087
{
1088
    set_float_exception_flags(0, &env->fpu->fp_status);
1089
    FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1090
    FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status);
1091
    FDT2 = float64_div(FDT2, FLOAT_TWO64, &env->fpu->fp_status) ^ FLOAT_SIGN64;
1092
    update_fcr31();
1093
}
1094
FLOAT_OP(rsqrt2, s)
1095
{
1096
    set_float_exception_flags(0, &env->fpu->fp_status);
1097
    FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1098
    FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1099
    FST2 = float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1100
    update_fcr31();
1101
}
1102
FLOAT_OP(rsqrt2, ps)
1103
{
1104
    set_float_exception_flags(0, &env->fpu->fp_status);
1105
    FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1106
    FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1107
    FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1108
    FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status);
1109
    FST2 = float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1110
    FSTH2 = float32_div(FSTH2, FLOAT_TWO32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1111
    update_fcr31();
1112
}
1113

    
1114
FLOAT_OP(addr, ps)
1115
{
1116
    set_float_exception_flags(0, &env->fpu->fp_status);
1117
    FST2 = float32_add (FST0, FSTH0, &env->fpu->fp_status);
1118
    FSTH2 = float32_add (FST1, FSTH1, &env->fpu->fp_status);
1119
    update_fcr31();
1120
}
1121

    
1122
FLOAT_OP(mulr, ps)
1123
{
1124
    set_float_exception_flags(0, &env->fpu->fp_status);
1125
    FST2 = float32_mul (FST0, FSTH0, &env->fpu->fp_status);
1126
    FSTH2 = float32_mul (FST1, FSTH1, &env->fpu->fp_status);
1127
    update_fcr31();
1128
}
1129

    
1130
/* compare operations */
1131
#define FOP_COND_D(op, cond)                   \
1132
void do_cmp_d_ ## op (long cc)                 \
1133
{                                              \
1134
    int c = cond;                              \
1135
    update_fcr31();                            \
1136
    if (c)                                     \
1137
        SET_FP_COND(cc, env->fpu);             \
1138
    else                                       \
1139
        CLEAR_FP_COND(cc, env->fpu);           \
1140
}                                              \
1141
void do_cmpabs_d_ ## op (long cc)              \
1142
{                                              \
1143
    int c;                                     \
1144
    FDT0 &= ~FLOAT_SIGN64;                     \
1145
    FDT1 &= ~FLOAT_SIGN64;                     \
1146
    c = cond;                                  \
1147
    update_fcr31();                            \
1148
    if (c)                                     \
1149
        SET_FP_COND(cc, env->fpu);             \
1150
    else                                       \
1151
        CLEAR_FP_COND(cc, env->fpu);           \
1152
}
1153

    
1154
int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
1155
{
1156
    if (float64_is_signaling_nan(a) ||
1157
        float64_is_signaling_nan(b) ||
1158
        (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
1159
        float_raise(float_flag_invalid, status);
1160
        return 1;
1161
    } else if (float64_is_nan(a) || float64_is_nan(b)) {
1162
        return 1;
1163
    } else {
1164
        return 0;
1165
    }
1166
}
1167

    
1168
/* NOTE: the comma operator will make "cond" to eval to false,
1169
 * but float*_is_unordered() is still called. */
1170
FOP_COND_D(f,   (float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status), 0))
1171
FOP_COND_D(un,  float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status))
1172
FOP_COND_D(eq,  !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1173
FOP_COND_D(ueq, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status)  || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1174
FOP_COND_D(olt, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1175
FOP_COND_D(ult, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status)  || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1176
FOP_COND_D(ole, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1177
FOP_COND_D(ule, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status)  || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1178
/* NOTE: the comma operator will make "cond" to eval to false,
1179
 * but float*_is_unordered() is still called. */
1180
FOP_COND_D(sf,  (float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status), 0))
1181
FOP_COND_D(ngle,float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status))
1182
FOP_COND_D(seq, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1183
FOP_COND_D(ngl, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status)  || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1184
FOP_COND_D(lt,  !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1185
FOP_COND_D(nge, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status)  || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1186
FOP_COND_D(le,  !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1187
FOP_COND_D(ngt, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status)  || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1188

    
1189
#define FOP_COND_S(op, cond)                   \
1190
void do_cmp_s_ ## op (long cc)                 \
1191
{                                              \
1192
    int c = cond;                              \
1193
    update_fcr31();                            \
1194
    if (c)                                     \
1195
        SET_FP_COND(cc, env->fpu);             \
1196
    else                                       \
1197
        CLEAR_FP_COND(cc, env->fpu);           \
1198
}                                              \
1199
void do_cmpabs_s_ ## op (long cc)              \
1200
{                                              \
1201
    int c;                                     \
1202
    FST0 &= ~FLOAT_SIGN32;                     \
1203
    FST1 &= ~FLOAT_SIGN32;                     \
1204
    c = cond;                                  \
1205
    update_fcr31();                            \
1206
    if (c)                                     \
1207
        SET_FP_COND(cc, env->fpu);             \
1208
    else                                       \
1209
        CLEAR_FP_COND(cc, env->fpu);           \
1210
}
1211

    
1212
flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
1213
{
1214
    if (float32_is_signaling_nan(a) ||
1215
        float32_is_signaling_nan(b) ||
1216
        (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
1217
        float_raise(float_flag_invalid, status);
1218
        return 1;
1219
    } else if (float32_is_nan(a) || float32_is_nan(b)) {
1220
        return 1;
1221
    } else {
1222
        return 0;
1223
    }
1224
}
1225

    
1226
/* NOTE: the comma operator will make "cond" to eval to false,
1227
 * but float*_is_unordered() is still called. */
1228
FOP_COND_S(f,   (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0))
1229
FOP_COND_S(un,  float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status))
1230
FOP_COND_S(eq,  !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1231
FOP_COND_S(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)  || float32_eq(FST0, FST1, &env->fpu->fp_status))
1232
FOP_COND_S(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1233
FOP_COND_S(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)  || float32_lt(FST0, FST1, &env->fpu->fp_status))
1234
FOP_COND_S(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1235
FOP_COND_S(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)  || float32_le(FST0, FST1, &env->fpu->fp_status))
1236
/* NOTE: the comma operator will make "cond" to eval to false,
1237
 * but float*_is_unordered() is still called. */
1238
FOP_COND_S(sf,  (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0))
1239
FOP_COND_S(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status))
1240
FOP_COND_S(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1241
FOP_COND_S(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)  || float32_eq(FST0, FST1, &env->fpu->fp_status))
1242
FOP_COND_S(lt,  !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1243
FOP_COND_S(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)  || float32_lt(FST0, FST1, &env->fpu->fp_status))
1244
FOP_COND_S(le,  !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1245
FOP_COND_S(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)  || float32_le(FST0, FST1, &env->fpu->fp_status))
1246

    
1247
#define FOP_COND_PS(op, condl, condh)          \
1248
void do_cmp_ps_ ## op (long cc)                \
1249
{                                              \
1250
    int cl = condl;                            \
1251
    int ch = condh;                            \
1252
    update_fcr31();                            \
1253
    if (cl)                                    \
1254
        SET_FP_COND(cc, env->fpu);             \
1255
    else                                       \
1256
        CLEAR_FP_COND(cc, env->fpu);           \
1257
    if (ch)                                    \
1258
        SET_FP_COND(cc + 1, env->fpu);         \
1259
    else                                       \
1260
        CLEAR_FP_COND(cc + 1, env->fpu);       \
1261
}                                              \
1262
void do_cmpabs_ps_ ## op (long cc)             \
1263
{                                              \
1264
    int cl, ch;                                \
1265
    FST0 &= ~FLOAT_SIGN32;                     \
1266
    FSTH0 &= ~FLOAT_SIGN32;                    \
1267
    FST1 &= ~FLOAT_SIGN32;                     \
1268
    FSTH1 &= ~FLOAT_SIGN32;                    \
1269
    cl = condl;                                \
1270
    ch = condh;                                \
1271
    update_fcr31();                            \
1272
    if (cl)                                    \
1273
        SET_FP_COND(cc, env->fpu);             \
1274
    else                                       \
1275
        CLEAR_FP_COND(cc, env->fpu);           \
1276
    if (ch)                                    \
1277
        SET_FP_COND(cc + 1, env->fpu);         \
1278
    else                                       \
1279
        CLEAR_FP_COND(cc + 1, env->fpu);       \
1280
}
1281

    
1282
/* NOTE: the comma operator will make "cond" to eval to false,
1283
 * but float*_is_unordered() is still called. */
1284
FOP_COND_PS(f,   (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0),
1285
                 (float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1286
FOP_COND_PS(un,  float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status),
1287
                 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status))
1288
FOP_COND_PS(eq,  !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)   && float32_eq(FST0, FST1, &env->fpu->fp_status),
1289
                 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1290
FOP_COND_PS(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)    || float32_eq(FST0, FST1, &env->fpu->fp_status),
1291
                 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1292
FOP_COND_PS(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)   && float32_lt(FST0, FST1, &env->fpu->fp_status),
1293
                 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1294
FOP_COND_PS(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)    || float32_lt(FST0, FST1, &env->fpu->fp_status),
1295
                 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1296
FOP_COND_PS(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)   && float32_le(FST0, FST1, &env->fpu->fp_status),
1297
                 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1298
FOP_COND_PS(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)    || float32_le(FST0, FST1, &env->fpu->fp_status),
1299
                 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1300
/* NOTE: the comma operator will make "cond" to eval to false,
1301
 * but float*_is_unordered() is still called. */
1302
FOP_COND_PS(sf,  (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0),
1303
                 (float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1304
FOP_COND_PS(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status),
1305
                 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status))
1306
FOP_COND_PS(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)   && float32_eq(FST0, FST1, &env->fpu->fp_status),
1307
                 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1308
FOP_COND_PS(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)    || float32_eq(FST0, FST1, &env->fpu->fp_status),
1309
                 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1310
FOP_COND_PS(lt,  !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)   && float32_lt(FST0, FST1, &env->fpu->fp_status),
1311
                 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1312
FOP_COND_PS(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)    || float32_lt(FST0, FST1, &env->fpu->fp_status),
1313
                 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1314
FOP_COND_PS(le,  !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)   && float32_le(FST0, FST1, &env->fpu->fp_status),
1315
                 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1316
FOP_COND_PS(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)    || float32_le(FST0, FST1, &env->fpu->fp_status),
1317
                 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))