Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ 273af660

History | View | Annotate | Download (43.1 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdlib.h>
21
#include "exec.h"
22

    
23
#include "host-utils.h"
24

    
25
#ifdef __s390__
26
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
27
#else
28
# define GETPC() (__builtin_return_address(0))
29
#endif
30

    
31
/*****************************************************************************/
32
/* Exceptions processing helpers */
33

    
34
void do_raise_exception_err (uint32_t exception, int error_code)
35
{
36
#if 1
37
    if (logfile && exception < 0x100)
38
        fprintf(logfile, "%s: %d %d\n", __func__, exception, error_code);
39
#endif
40
    env->exception_index = exception;
41
    env->error_code = error_code;
42
    T0 = 0;
43
    cpu_loop_exit();
44
}
45

    
46
void do_raise_exception (uint32_t exception)
47
{
48
    do_raise_exception_err(exception, 0);
49
}
50

    
51
void do_restore_state (void *pc_ptr)
52
{
53
  TranslationBlock *tb;
54
  unsigned long pc = (unsigned long) pc_ptr;
55

    
56
  tb = tb_find_pc (pc);
57
  cpu_restore_state (tb, env, pc, NULL);
58
}
59

    
60
void do_raise_exception_direct_err (uint32_t exception, int error_code)
61
{
62
    do_restore_state (GETPC ());
63
    do_raise_exception_err (exception, error_code);
64
}
65

    
66
void do_raise_exception_direct (uint32_t exception)
67
{
68
    do_raise_exception_direct_err (exception, 0);
69
}
70

    
71
#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
72
#if TARGET_LONG_BITS > HOST_LONG_BITS
73
/* Those might call libgcc functions.  */
74
void do_dsll (void)
75
{
76
    T0 = T0 << T1;
77
}
78

    
79
void do_dsll32 (void)
80
{
81
    T0 = T0 << (T1 + 32);
82
}
83

    
84
void do_dsra (void)
85
{
86
    T0 = (int64_t)T0 >> T1;
87
}
88

    
89
void do_dsra32 (void)
90
{
91
    T0 = (int64_t)T0 >> (T1 + 32);
92
}
93

    
94
void do_dsrl (void)
95
{
96
    T0 = T0 >> T1;
97
}
98

    
99
void do_dsrl32 (void)
100
{
101
    T0 = T0 >> (T1 + 32);
102
}
103

    
104
void do_drotr (void)
105
{
106
    target_ulong tmp;
107

    
108
    if (T1) {
109
       tmp = T0 << (0x40 - T1);
110
       T0 = (T0 >> T1) | tmp;
111
    }
112
}
113

    
114
void do_drotr32 (void)
115
{
116
    target_ulong tmp;
117

    
118
    if (T1) {
119
       tmp = T0 << (0x40 - (32 + T1));
120
       T0 = (T0 >> (32 + T1)) | tmp;
121
    }
122
}
123

    
124
void do_dsllv (void)
125
{
126
    T0 = T1 << (T0 & 0x3F);
127
}
128

    
129
void do_dsrav (void)
130
{
131
    T0 = (int64_t)T1 >> (T0 & 0x3F);
132
}
133

    
134
void do_dsrlv (void)
135
{
136
    T0 = T1 >> (T0 & 0x3F);
137
}
138

    
139
void do_drotrv (void)
140
{
141
    target_ulong tmp;
142

    
143
    T0 &= 0x3F;
144
    if (T0) {
145
       tmp = T1 << (0x40 - T0);
146
       T0 = (T1 >> T0) | tmp;
147
    } else
148
       T0 = T1;
149
}
150

    
151
void do_dclo (void)
152
{
153
    T0 = clo64(T0);
154
}
155

    
156
void do_dclz (void)
157
{
158
    T0 = clz64(T0);
159
}
160

    
161
#endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
162
#endif /* TARGET_MIPSN32 || TARGET_MIPS64 */
163

    
164
/* 64 bits arithmetic for 32 bits hosts */
165
#if TARGET_LONG_BITS > HOST_LONG_BITS
166
static always_inline uint64_t get_HILO (void)
167
{
168
    return (env->HI[0][env->current_tc] << 32) | (uint32_t)env->LO[0][env->current_tc];
169
}
170

    
171
static always_inline void set_HILO (uint64_t HILO)
172
{
173
    env->LO[0][env->current_tc] = (int32_t)HILO;
174
    env->HI[0][env->current_tc] = (int32_t)(HILO >> 32);
175
}
176

    
177
void do_mult (void)
178
{
179
    set_HILO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
180
}
181

    
182
void do_multu (void)
183
{
184
    set_HILO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
185
}
186

    
187
void do_madd (void)
188
{
189
    int64_t tmp;
190

    
191
    tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
192
    set_HILO((int64_t)get_HILO() + tmp);
193
}
194

    
195
void do_maddu (void)
196
{
197
    uint64_t tmp;
198

    
199
    tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
200
    set_HILO(get_HILO() + tmp);
201
}
202

    
203
void do_msub (void)
204
{
205
    int64_t tmp;
206

    
207
    tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
208
    set_HILO((int64_t)get_HILO() - tmp);
209
}
210

    
211
void do_msubu (void)
212
{
213
    uint64_t tmp;
214

    
215
    tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
216
    set_HILO(get_HILO() - tmp);
217
}
218
#endif
219

    
220
#if HOST_LONG_BITS < 64
221
void do_div (void)
222
{
223
    /* 64bit datatypes because we may see overflow/underflow. */
224
    if (T1 != 0) {
225
        env->LO[0][env->current_tc] = (int32_t)((int64_t)(int32_t)T0 / (int32_t)T1);
226
        env->HI[0][env->current_tc] = (int32_t)((int64_t)(int32_t)T0 % (int32_t)T1);
227
    }
228
}
229
#endif
230

    
231
#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
232
void do_ddiv (void)
233
{
234
    if (T1 != 0) {
235
        lldiv_t res = lldiv((int64_t)T0, (int64_t)T1);
236
        env->LO[0][env->current_tc] = res.quot;
237
        env->HI[0][env->current_tc] = res.rem;
238
    }
239
}
240

    
241
#if TARGET_LONG_BITS > HOST_LONG_BITS
242
void do_ddivu (void)
243
{
244
    if (T1 != 0) {
245
        env->LO[0][env->current_tc] = T0 / T1;
246
        env->HI[0][env->current_tc] = T0 % T1;
247
    }
248
}
249
#endif
250
#endif /* TARGET_MIPSN32 || TARGET_MIPS64 */
251

    
252
#if defined(CONFIG_USER_ONLY)
253
void do_mfc0_random (void)
254
{
255
    cpu_abort(env, "mfc0 random\n");
256
}
257

    
258
void do_mfc0_count (void)
259
{
260
    cpu_abort(env, "mfc0 count\n");
261
}
262

    
263
void cpu_mips_store_count(CPUState *env, uint32_t value)
264
{
265
    cpu_abort(env, "mtc0 count\n");
266
}
267

    
268
void cpu_mips_store_compare(CPUState *env, uint32_t value)
269
{
270
    cpu_abort(env, "mtc0 compare\n");
271
}
272

    
273
void cpu_mips_start_count(CPUState *env)
274
{
275
    cpu_abort(env, "start count\n");
276
}
277

    
278
void cpu_mips_stop_count(CPUState *env)
279
{
280
    cpu_abort(env, "stop count\n");
281
}
282

    
283
void cpu_mips_update_irq(CPUState *env)
284
{
285
    cpu_abort(env, "mtc0 status / mtc0 cause\n");
286
}
287

    
288
void do_mtc0_status_debug(uint32_t old, uint32_t val)
289
{
290
    cpu_abort(env, "mtc0 status debug\n");
291
}
292

    
293
void do_mtc0_status_irqraise_debug (void)
294
{
295
    cpu_abort(env, "mtc0 status irqraise debug\n");
296
}
297

    
298
void cpu_mips_tlb_flush (CPUState *env, int flush_global)
299
{
300
    cpu_abort(env, "mips_tlb_flush\n");
301
}
302

    
303
#else
304

    
305
/* CP0 helpers */
306
void do_mfc0_random (void)
307
{
308
    T0 = (int32_t)cpu_mips_get_random(env);
309
}
310

    
311
void do_mfc0_count (void)
312
{
313
    T0 = (int32_t)cpu_mips_get_count(env);
314
}
315

    
316
void do_mtc0_status_debug(uint32_t old, uint32_t val)
317
{
318
    fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",
319
            old, old & env->CP0_Cause & CP0Ca_IP_mask,
320
            val, val & env->CP0_Cause & CP0Ca_IP_mask,
321
            env->CP0_Cause);
322
    switch (env->hflags & MIPS_HFLAG_KSU) {
323
    case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
324
    case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
325
    case MIPS_HFLAG_KM: fputs("\n", logfile); break;
326
    default: cpu_abort(env, "Invalid MMU mode!\n"); break;
327
    }
328
}
329

    
330
void do_mtc0_status_irqraise_debug(void)
331
{
332
    fprintf(logfile, "Raise pending IRQs\n");
333
}
334

    
335
void fpu_handle_exception(void)
336
{
337
#ifdef CONFIG_SOFTFLOAT
338
    int flags = get_float_exception_flags(&env->fpu->fp_status);
339
    unsigned int cpuflags = 0, enable, cause = 0;
340

    
341
    enable = GET_FP_ENABLE(env->fpu->fcr31);
342

    
343
    /* determine current flags */
344
    if (flags & float_flag_invalid) {
345
        cpuflags |= FP_INVALID;
346
        cause |= FP_INVALID & enable;
347
    }
348
    if (flags & float_flag_divbyzero) {
349
        cpuflags |= FP_DIV0;
350
        cause |= FP_DIV0 & enable;
351
    }
352
    if (flags & float_flag_overflow) {
353
        cpuflags |= FP_OVERFLOW;
354
        cause |= FP_OVERFLOW & enable;
355
    }
356
    if (flags & float_flag_underflow) {
357
        cpuflags |= FP_UNDERFLOW;
358
        cause |= FP_UNDERFLOW & enable;
359
    }
360
    if (flags & float_flag_inexact) {
361
        cpuflags |= FP_INEXACT;
362
        cause |= FP_INEXACT & enable;
363
    }
364
    SET_FP_FLAGS(env->fpu->fcr31, cpuflags);
365
    SET_FP_CAUSE(env->fpu->fcr31, cause);
366
#else
367
    SET_FP_FLAGS(env->fpu->fcr31, 0);
368
    SET_FP_CAUSE(env->fpu->fcr31, 0);
369
#endif
370
}
371

    
372
/* TLB management */
373
void cpu_mips_tlb_flush (CPUState *env, int flush_global)
374
{
375
    /* Flush qemu's TLB and discard all shadowed entries.  */
376
    tlb_flush (env, flush_global);
377
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
378
}
379

    
380
static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
381
{
382
    /* Discard entries from env->tlb[first] onwards.  */
383
    while (env->tlb->tlb_in_use > first) {
384
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
385
    }
386
}
387

    
388
static void r4k_fill_tlb (int idx)
389
{
390
    r4k_tlb_t *tlb;
391

    
392
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
393
    tlb = &env->tlb->mmu.r4k.tlb[idx];
394
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
395
#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
396
    tlb->VPN &= env->SEGMask;
397
#endif
398
    tlb->ASID = env->CP0_EntryHi & 0xFF;
399
    tlb->PageMask = env->CP0_PageMask;
400
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
401
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
402
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
403
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
404
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
405
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
406
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
407
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
408
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
409
}
410

    
411
void r4k_do_tlbwi (void)
412
{
413
    /* Discard cached TLB entries.  We could avoid doing this if the
414
       tlbwi is just upgrading access permissions on the current entry;
415
       that might be a further win.  */
416
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
417

    
418
    r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb, 0);
419
    r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
420
}
421

    
422
void r4k_do_tlbwr (void)
423
{
424
    int r = cpu_mips_get_random(env);
425

    
426
    r4k_invalidate_tlb(env, r, 1);
427
    r4k_fill_tlb(r);
428
}
429

    
430
void r4k_do_tlbp (void)
431
{
432
    r4k_tlb_t *tlb;
433
    target_ulong mask;
434
    target_ulong tag;
435
    target_ulong VPN;
436
    uint8_t ASID;
437
    int i;
438

    
439
    ASID = env->CP0_EntryHi & 0xFF;
440
    for (i = 0; i < env->tlb->nb_tlb; i++) {
441
        tlb = &env->tlb->mmu.r4k.tlb[i];
442
        /* 1k pages are not supported. */
443
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
444
        tag = env->CP0_EntryHi & ~mask;
445
        VPN = tlb->VPN & ~mask;
446
        /* Check ASID, virtual page number & size */
447
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
448
            /* TLB match */
449
            env->CP0_Index = i;
450
            break;
451
        }
452
    }
453
    if (i == env->tlb->nb_tlb) {
454
        /* No match.  Discard any shadow entries, if any of them match.  */
455
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
456
            tlb = &env->tlb->mmu.r4k.tlb[i];
457
            /* 1k pages are not supported. */
458
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
459
            tag = env->CP0_EntryHi & ~mask;
460
            VPN = tlb->VPN & ~mask;
461
            /* Check ASID, virtual page number & size */
462
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
463
                r4k_mips_tlb_flush_extra (env, i);
464
                break;
465
            }
466
        }
467

    
468
        env->CP0_Index |= 0x80000000;
469
    }
470
}
471

    
472
void r4k_do_tlbr (void)
473
{
474
    r4k_tlb_t *tlb;
475
    uint8_t ASID;
476

    
477
    ASID = env->CP0_EntryHi & 0xFF;
478
    tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
479

    
480
    /* If this will change the current ASID, flush qemu's TLB.  */
481
    if (ASID != tlb->ASID)
482
        cpu_mips_tlb_flush (env, 1);
483

    
484
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
485

    
486
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
487
    env->CP0_PageMask = tlb->PageMask;
488
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
489
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
490
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
491
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
492
}
493

    
494
#endif /* !CONFIG_USER_ONLY */
495

    
496
void dump_ldst (const unsigned char *func)
497
{
498
    if (loglevel)
499
        fprintf(logfile, "%s => " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, T0, T1);
500
}
501

    
502
void dump_sc (void)
503
{
504
    if (loglevel) {
505
        fprintf(logfile, "%s " TARGET_FMT_lx " at " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", __func__,
506
                T1, T0, env->CP0_LLAddr);
507
    }
508
}
509

    
510
void debug_pre_eret (void)
511
{
512
    fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
513
            env->PC[env->current_tc], env->CP0_EPC);
514
    if (env->CP0_Status & (1 << CP0St_ERL))
515
        fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
516
    if (env->hflags & MIPS_HFLAG_DM)
517
        fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
518
    fputs("\n", logfile);
519
}
520

    
521
void debug_post_eret (void)
522
{
523
    fprintf(logfile, "  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
524
            env->PC[env->current_tc], env->CP0_EPC);
525
    if (env->CP0_Status & (1 << CP0St_ERL))
526
        fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
527
    if (env->hflags & MIPS_HFLAG_DM)
528
        fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
529
    switch (env->hflags & MIPS_HFLAG_KSU) {
530
    case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
531
    case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
532
    case MIPS_HFLAG_KM: fputs("\n", logfile); break;
533
    default: cpu_abort(env, "Invalid MMU mode!\n"); break;
534
    }
535
}
536

    
537
void do_pmon (int function)
538
{
539
    function /= 2;
540
    switch (function) {
541
    case 2: /* TODO: char inbyte(int waitflag); */
542
        if (env->gpr[4][env->current_tc] == 0)
543
            env->gpr[2][env->current_tc] = -1;
544
        /* Fall through */
545
    case 11: /* TODO: char inbyte (void); */
546
        env->gpr[2][env->current_tc] = -1;
547
        break;
548
    case 3:
549
    case 12:
550
        printf("%c", (char)(env->gpr[4][env->current_tc] & 0xFF));
551
        break;
552
    case 17:
553
        break;
554
    case 158:
555
        {
556
            unsigned char *fmt = (void *)(unsigned long)env->gpr[4][env->current_tc];
557
            printf("%s", fmt);
558
        }
559
        break;
560
    }
561
}
562

    
563
#if !defined(CONFIG_USER_ONLY)
564

    
565
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
566

    
567
#define MMUSUFFIX _mmu
568
#define ALIGNED_ONLY
569

    
570
#define SHIFT 0
571
#include "softmmu_template.h"
572

    
573
#define SHIFT 1
574
#include "softmmu_template.h"
575

    
576
#define SHIFT 2
577
#include "softmmu_template.h"
578

    
579
#define SHIFT 3
580
#include "softmmu_template.h"
581

    
582
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
583
{
584
    env->CP0_BadVAddr = addr;
585
    do_restore_state (retaddr);
586
    do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
587
}
588

    
589
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
590
{
591
    TranslationBlock *tb;
592
    CPUState *saved_env;
593
    unsigned long pc;
594
    int ret;
595

    
596
    /* XXX: hack to restore env in all cases, even if not called from
597
       generated code */
598
    saved_env = env;
599
    env = cpu_single_env;
600
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
601
    if (ret) {
602
        if (retaddr) {
603
            /* now we have a real cpu fault */
604
            pc = (unsigned long)retaddr;
605
            tb = tb_find_pc(pc);
606
            if (tb) {
607
                /* the PC is inside the translated code. It means that we have
608
                   a virtual CPU fault */
609
                cpu_restore_state(tb, env, pc, NULL);
610
            }
611
        }
612
        do_raise_exception_err(env->exception_index, env->error_code);
613
    }
614
    env = saved_env;
615
}
616

    
617
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
618
                          int unused)
619
{
620
    if (is_exec)
621
        do_raise_exception(EXCP_IBE);
622
    else
623
        do_raise_exception(EXCP_DBE);
624
}
625
#endif
626

    
627
/* Complex FPU operations which may need stack space. */
628

    
629
#define FLOAT_SIGN32 (1 << 31)
630
#define FLOAT_SIGN64 (1ULL << 63)
631
#define FLOAT_ONE32 (0x3f8 << 20)
632
#define FLOAT_ONE64 (0x3ffULL << 52)
633
#define FLOAT_TWO32 (1 << 30)
634
#define FLOAT_TWO64 (1ULL << 62)
635
#define FLOAT_QNAN32 0x7fbfffff
636
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
637
#define FLOAT_SNAN32 0x7fffffff
638
#define FLOAT_SNAN64 0x7fffffffffffffffULL
639

    
640
/* convert MIPS rounding mode in FCR31 to IEEE library */
641
unsigned int ieee_rm[] = {
642
    float_round_nearest_even,
643
    float_round_to_zero,
644
    float_round_up,
645
    float_round_down
646
};
647

    
648
#define RESTORE_ROUNDING_MODE \
649
    set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
650

    
651
void do_cfc1 (int reg)
652
{
653
    switch (reg) {
654
    case 0:
655
        T0 = (int32_t)env->fpu->fcr0;
656
        break;
657
    case 25:
658
        T0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1);
659
        break;
660
    case 26:
661
        T0 = env->fpu->fcr31 & 0x0003f07c;
662
        break;
663
    case 28:
664
        T0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4);
665
        break;
666
    default:
667
        T0 = (int32_t)env->fpu->fcr31;
668
        break;
669
    }
670
}
671

    
672
void do_ctc1 (int reg)
673
{
674
    switch(reg) {
675
    case 25:
676
        if (T0 & 0xffffff00)
677
            return;
678
        env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((T0 & 0xfe) << 24) |
679
                     ((T0 & 0x1) << 23);
680
        break;
681
    case 26:
682
        if (T0 & 0x007c0000)
683
            return;
684
        env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (T0 & 0x0003f07c);
685
        break;
686
    case 28:
687
        if (T0 & 0x007c0000)
688
            return;
689
        env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (T0 & 0x00000f83) |
690
                     ((T0 & 0x4) << 22);
691
        break;
692
    case 31:
693
        if (T0 & 0x007c0000)
694
            return;
695
        env->fpu->fcr31 = T0;
696
        break;
697
    default:
698
        return;
699
    }
700
    /* set rounding mode */
701
    RESTORE_ROUNDING_MODE;
702
    set_float_exception_flags(0, &env->fpu->fp_status);
703
    if ((GET_FP_ENABLE(env->fpu->fcr31) | 0x20) & GET_FP_CAUSE(env->fpu->fcr31))
704
        do_raise_exception(EXCP_FPE);
705
}
706

    
707
static always_inline char ieee_ex_to_mips(char xcpt)
708
{
709
    return (xcpt & float_flag_inexact) >> 5 |
710
           (xcpt & float_flag_underflow) >> 3 |
711
           (xcpt & float_flag_overflow) >> 1 |
712
           (xcpt & float_flag_divbyzero) << 1 |
713
           (xcpt & float_flag_invalid) << 4;
714
}
715

    
716
static always_inline char mips_ex_to_ieee(char xcpt)
717
{
718
    return (xcpt & FP_INEXACT) << 5 |
719
           (xcpt & FP_UNDERFLOW) << 3 |
720
           (xcpt & FP_OVERFLOW) << 1 |
721
           (xcpt & FP_DIV0) >> 1 |
722
           (xcpt & FP_INVALID) >> 4;
723
}
724

    
725
static always_inline void update_fcr31(void)
726
{
727
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->fpu->fp_status));
728

    
729
    SET_FP_CAUSE(env->fpu->fcr31, tmp);
730
    if (GET_FP_ENABLE(env->fpu->fcr31) & tmp)
731
        do_raise_exception(EXCP_FPE);
732
    else
733
        UPDATE_FP_FLAGS(env->fpu->fcr31, tmp);
734
}
735

    
736
#define FLOAT_OP(name, p) void do_float_##name##_##p(void)
737

    
738
FLOAT_OP(cvtd, s)
739
{
740
    set_float_exception_flags(0, &env->fpu->fp_status);
741
    FDT2 = float32_to_float64(FST0, &env->fpu->fp_status);
742
    update_fcr31();
743
}
744
FLOAT_OP(cvtd, w)
745
{
746
    set_float_exception_flags(0, &env->fpu->fp_status);
747
    FDT2 = int32_to_float64(WT0, &env->fpu->fp_status);
748
    update_fcr31();
749
}
750
FLOAT_OP(cvtd, l)
751
{
752
    set_float_exception_flags(0, &env->fpu->fp_status);
753
    FDT2 = int64_to_float64(DT0, &env->fpu->fp_status);
754
    update_fcr31();
755
}
756
FLOAT_OP(cvtl, d)
757
{
758
    set_float_exception_flags(0, &env->fpu->fp_status);
759
    DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
760
    update_fcr31();
761
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
762
        DT2 = FLOAT_SNAN64;
763
}
764
FLOAT_OP(cvtl, s)
765
{
766
    set_float_exception_flags(0, &env->fpu->fp_status);
767
    DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
768
    update_fcr31();
769
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
770
        DT2 = FLOAT_SNAN64;
771
}
772

    
773
FLOAT_OP(cvtps, pw)
774
{
775
    set_float_exception_flags(0, &env->fpu->fp_status);
776
    FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
777
    FSTH2 = int32_to_float32(WTH0, &env->fpu->fp_status);
778
    update_fcr31();
779
}
780
FLOAT_OP(cvtpw, ps)
781
{
782
    set_float_exception_flags(0, &env->fpu->fp_status);
783
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
784
    WTH2 = float32_to_int32(FSTH0, &env->fpu->fp_status);
785
    update_fcr31();
786
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
787
        WT2 = FLOAT_SNAN32;
788
}
789
FLOAT_OP(cvts, d)
790
{
791
    set_float_exception_flags(0, &env->fpu->fp_status);
792
    FST2 = float64_to_float32(FDT0, &env->fpu->fp_status);
793
    update_fcr31();
794
}
795
FLOAT_OP(cvts, w)
796
{
797
    set_float_exception_flags(0, &env->fpu->fp_status);
798
    FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
799
    update_fcr31();
800
}
801
FLOAT_OP(cvts, l)
802
{
803
    set_float_exception_flags(0, &env->fpu->fp_status);
804
    FST2 = int64_to_float32(DT0, &env->fpu->fp_status);
805
    update_fcr31();
806
}
807
FLOAT_OP(cvts, pl)
808
{
809
    set_float_exception_flags(0, &env->fpu->fp_status);
810
    WT2 = WT0;
811
    update_fcr31();
812
}
813
FLOAT_OP(cvts, pu)
814
{
815
    set_float_exception_flags(0, &env->fpu->fp_status);
816
    WT2 = WTH0;
817
    update_fcr31();
818
}
819
FLOAT_OP(cvtw, s)
820
{
821
    set_float_exception_flags(0, &env->fpu->fp_status);
822
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
823
    update_fcr31();
824
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
825
        WT2 = FLOAT_SNAN32;
826
}
827
FLOAT_OP(cvtw, d)
828
{
829
    set_float_exception_flags(0, &env->fpu->fp_status);
830
    WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
831
    update_fcr31();
832
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
833
        WT2 = FLOAT_SNAN32;
834
}
835

    
836
FLOAT_OP(roundl, d)
837
{
838
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
839
    DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
840
    RESTORE_ROUNDING_MODE;
841
    update_fcr31();
842
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
843
        DT2 = FLOAT_SNAN64;
844
}
845
FLOAT_OP(roundl, s)
846
{
847
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
848
    DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
849
    RESTORE_ROUNDING_MODE;
850
    update_fcr31();
851
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
852
        DT2 = FLOAT_SNAN64;
853
}
854
FLOAT_OP(roundw, d)
855
{
856
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
857
    WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
858
    RESTORE_ROUNDING_MODE;
859
    update_fcr31();
860
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
861
        WT2 = FLOAT_SNAN32;
862
}
863
FLOAT_OP(roundw, s)
864
{
865
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
866
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
867
    RESTORE_ROUNDING_MODE;
868
    update_fcr31();
869
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
870
        WT2 = FLOAT_SNAN32;
871
}
872

    
873
FLOAT_OP(truncl, d)
874
{
875
    DT2 = float64_to_int64_round_to_zero(FDT0, &env->fpu->fp_status);
876
    update_fcr31();
877
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
878
        DT2 = FLOAT_SNAN64;
879
}
880
FLOAT_OP(truncl, s)
881
{
882
    DT2 = float32_to_int64_round_to_zero(FST0, &env->fpu->fp_status);
883
    update_fcr31();
884
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
885
        DT2 = FLOAT_SNAN64;
886
}
887
FLOAT_OP(truncw, d)
888
{
889
    WT2 = float64_to_int32_round_to_zero(FDT0, &env->fpu->fp_status);
890
    update_fcr31();
891
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
892
        WT2 = FLOAT_SNAN32;
893
}
894
FLOAT_OP(truncw, s)
895
{
896
    WT2 = float32_to_int32_round_to_zero(FST0, &env->fpu->fp_status);
897
    update_fcr31();
898
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
899
        WT2 = FLOAT_SNAN32;
900
}
901

    
902
FLOAT_OP(ceill, d)
903
{
904
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
905
    DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
906
    RESTORE_ROUNDING_MODE;
907
    update_fcr31();
908
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
909
        DT2 = FLOAT_SNAN64;
910
}
911
FLOAT_OP(ceill, s)
912
{
913
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
914
    DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
915
    RESTORE_ROUNDING_MODE;
916
    update_fcr31();
917
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
918
        DT2 = FLOAT_SNAN64;
919
}
920
FLOAT_OP(ceilw, d)
921
{
922
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
923
    WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
924
    RESTORE_ROUNDING_MODE;
925
    update_fcr31();
926
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
927
        WT2 = FLOAT_SNAN32;
928
}
929
FLOAT_OP(ceilw, s)
930
{
931
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
932
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
933
    RESTORE_ROUNDING_MODE;
934
    update_fcr31();
935
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
936
        WT2 = FLOAT_SNAN32;
937
}
938

    
939
FLOAT_OP(floorl, d)
940
{
941
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
942
    DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
943
    RESTORE_ROUNDING_MODE;
944
    update_fcr31();
945
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
946
        DT2 = FLOAT_SNAN64;
947
}
948
FLOAT_OP(floorl, s)
949
{
950
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
951
    DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
952
    RESTORE_ROUNDING_MODE;
953
    update_fcr31();
954
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
955
        DT2 = FLOAT_SNAN64;
956
}
957
FLOAT_OP(floorw, d)
958
{
959
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
960
    WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
961
    RESTORE_ROUNDING_MODE;
962
    update_fcr31();
963
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
964
        WT2 = FLOAT_SNAN32;
965
}
966
FLOAT_OP(floorw, s)
967
{
968
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
969
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
970
    RESTORE_ROUNDING_MODE;
971
    update_fcr31();
972
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
973
        WT2 = FLOAT_SNAN32;
974
}
975

    
976
/* MIPS specific unary operations */
977
FLOAT_OP(recip, d)
978
{
979
    set_float_exception_flags(0, &env->fpu->fp_status);
980
    FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
981
    update_fcr31();
982
}
983
FLOAT_OP(recip, s)
984
{
985
    set_float_exception_flags(0, &env->fpu->fp_status);
986
    FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
987
    update_fcr31();
988
}
989

    
990
FLOAT_OP(rsqrt, d)
991
{
992
    set_float_exception_flags(0, &env->fpu->fp_status);
993
    FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
994
    FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
995
    update_fcr31();
996
}
997
FLOAT_OP(rsqrt, s)
998
{
999
    set_float_exception_flags(0, &env->fpu->fp_status);
1000
    FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1001
    FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1002
    update_fcr31();
1003
}
1004

    
1005
FLOAT_OP(recip1, d)
1006
{
1007
    set_float_exception_flags(0, &env->fpu->fp_status);
1008
    FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
1009
    update_fcr31();
1010
}
1011
FLOAT_OP(recip1, s)
1012
{
1013
    set_float_exception_flags(0, &env->fpu->fp_status);
1014
    FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1015
    update_fcr31();
1016
}
1017
FLOAT_OP(recip1, ps)
1018
{
1019
    set_float_exception_flags(0, &env->fpu->fp_status);
1020
    FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1021
    FSTH2 = float32_div(FLOAT_ONE32, FSTH0, &env->fpu->fp_status);
1022
    update_fcr31();
1023
}
1024

    
1025
FLOAT_OP(rsqrt1, d)
1026
{
1027
    set_float_exception_flags(0, &env->fpu->fp_status);
1028
    FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
1029
    FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
1030
    update_fcr31();
1031
}
1032
FLOAT_OP(rsqrt1, s)
1033
{
1034
    set_float_exception_flags(0, &env->fpu->fp_status);
1035
    FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1036
    FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1037
    update_fcr31();
1038
}
1039
FLOAT_OP(rsqrt1, ps)
1040
{
1041
    set_float_exception_flags(0, &env->fpu->fp_status);
1042
    FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1043
    FSTH2 = float32_sqrt(FSTH0, &env->fpu->fp_status);
1044
    FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1045
    FSTH2 = float32_div(FLOAT_ONE32, FSTH2, &env->fpu->fp_status);
1046
    update_fcr31();
1047
}
1048

    
1049
/* binary operations */
1050
#define FLOAT_BINOP(name) \
1051
FLOAT_OP(name, d)         \
1052
{                         \
1053
    set_float_exception_flags(0, &env->fpu->fp_status);            \
1054
    FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status);    \
1055
    update_fcr31();                                                \
1056
    if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID)                \
1057
        FDT2 = FLOAT_QNAN64;                                       \
1058
}                         \
1059
FLOAT_OP(name, s)         \
1060
{                         \
1061
    set_float_exception_flags(0, &env->fpu->fp_status);            \
1062
    FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status);    \
1063
    update_fcr31();                                                \
1064
    if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID)                \
1065
        FST2 = FLOAT_QNAN32;                                       \
1066
}                         \
1067
FLOAT_OP(name, ps)        \
1068
{                         \
1069
    set_float_exception_flags(0, &env->fpu->fp_status);            \
1070
    FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status);    \
1071
    FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \
1072
    update_fcr31();       \
1073
    if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) {              \
1074
        FST2 = FLOAT_QNAN32;                                       \
1075
        FSTH2 = FLOAT_QNAN32;                                      \
1076
    }                     \
1077
}
1078
FLOAT_BINOP(add)
1079
FLOAT_BINOP(sub)
1080
FLOAT_BINOP(mul)
1081
FLOAT_BINOP(div)
1082
#undef FLOAT_BINOP
1083

    
1084
/* MIPS specific binary operations */
1085
FLOAT_OP(recip2, d)
1086
{
1087
    set_float_exception_flags(0, &env->fpu->fp_status);
1088
    FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1089
    FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status) ^ FLOAT_SIGN64;
1090
    update_fcr31();
1091
}
1092
FLOAT_OP(recip2, s)
1093
{
1094
    set_float_exception_flags(0, &env->fpu->fp_status);
1095
    FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1096
    FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1097
    update_fcr31();
1098
}
1099
FLOAT_OP(recip2, ps)
1100
{
1101
    set_float_exception_flags(0, &env->fpu->fp_status);
1102
    FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1103
    FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1104
    FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1105
    FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1106
    update_fcr31();
1107
}
1108

    
1109
FLOAT_OP(rsqrt2, d)
1110
{
1111
    set_float_exception_flags(0, &env->fpu->fp_status);
1112
    FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1113
    FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status);
1114
    FDT2 = float64_div(FDT2, FLOAT_TWO64, &env->fpu->fp_status) ^ FLOAT_SIGN64;
1115
    update_fcr31();
1116
}
1117
FLOAT_OP(rsqrt2, s)
1118
{
1119
    set_float_exception_flags(0, &env->fpu->fp_status);
1120
    FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1121
    FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1122
    FST2 = float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1123
    update_fcr31();
1124
}
1125
FLOAT_OP(rsqrt2, ps)
1126
{
1127
    set_float_exception_flags(0, &env->fpu->fp_status);
1128
    FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1129
    FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1130
    FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1131
    FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status);
1132
    FST2 = float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1133
    FSTH2 = float32_div(FSTH2, FLOAT_TWO32, &env->fpu->fp_status) ^ FLOAT_SIGN32;
1134
    update_fcr31();
1135
}
1136

    
1137
FLOAT_OP(addr, ps)
1138
{
1139
    set_float_exception_flags(0, &env->fpu->fp_status);
1140
    FST2 = float32_add (FST0, FSTH0, &env->fpu->fp_status);
1141
    FSTH2 = float32_add (FST1, FSTH1, &env->fpu->fp_status);
1142
    update_fcr31();
1143
}
1144

    
1145
FLOAT_OP(mulr, ps)
1146
{
1147
    set_float_exception_flags(0, &env->fpu->fp_status);
1148
    FST2 = float32_mul (FST0, FSTH0, &env->fpu->fp_status);
1149
    FSTH2 = float32_mul (FST1, FSTH1, &env->fpu->fp_status);
1150
    update_fcr31();
1151
}
1152

    
1153
/* compare operations */
1154
#define FOP_COND_D(op, cond)                   \
1155
void do_cmp_d_ ## op (long cc)                 \
1156
{                                              \
1157
    int c = cond;                              \
1158
    update_fcr31();                            \
1159
    if (c)                                     \
1160
        SET_FP_COND(cc, env->fpu);             \
1161
    else                                       \
1162
        CLEAR_FP_COND(cc, env->fpu);           \
1163
}                                              \
1164
void do_cmpabs_d_ ## op (long cc)              \
1165
{                                              \
1166
    int c;                                     \
1167
    FDT0 &= ~FLOAT_SIGN64;                     \
1168
    FDT1 &= ~FLOAT_SIGN64;                     \
1169
    c = cond;                                  \
1170
    update_fcr31();                            \
1171
    if (c)                                     \
1172
        SET_FP_COND(cc, env->fpu);             \
1173
    else                                       \
1174
        CLEAR_FP_COND(cc, env->fpu);           \
1175
}
1176

    
1177
int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
1178
{
1179
    if (float64_is_signaling_nan(a) ||
1180
        float64_is_signaling_nan(b) ||
1181
        (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
1182
        float_raise(float_flag_invalid, status);
1183
        return 1;
1184
    } else if (float64_is_nan(a) || float64_is_nan(b)) {
1185
        return 1;
1186
    } else {
1187
        return 0;
1188
    }
1189
}
1190

    
1191
/* NOTE: the comma operator will make "cond" to eval to false,
1192
 * but float*_is_unordered() is still called. */
1193
FOP_COND_D(f,   (float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status), 0))
1194
FOP_COND_D(un,  float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status))
1195
FOP_COND_D(eq,  !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1196
FOP_COND_D(ueq, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status)  || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1197
FOP_COND_D(olt, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1198
FOP_COND_D(ult, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status)  || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1199
FOP_COND_D(ole, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1200
FOP_COND_D(ule, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status)  || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1201
/* NOTE: the comma operator will make "cond" to eval to false,
1202
 * but float*_is_unordered() is still called. */
1203
FOP_COND_D(sf,  (float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status), 0))
1204
FOP_COND_D(ngle,float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status))
1205
FOP_COND_D(seq, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1206
FOP_COND_D(ngl, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status)  || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1207
FOP_COND_D(lt,  !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1208
FOP_COND_D(nge, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status)  || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1209
FOP_COND_D(le,  !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1210
FOP_COND_D(ngt, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status)  || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1211

    
1212
#define FOP_COND_S(op, cond)                   \
1213
void do_cmp_s_ ## op (long cc)                 \
1214
{                                              \
1215
    int c = cond;                              \
1216
    update_fcr31();                            \
1217
    if (c)                                     \
1218
        SET_FP_COND(cc, env->fpu);             \
1219
    else                                       \
1220
        CLEAR_FP_COND(cc, env->fpu);           \
1221
}                                              \
1222
void do_cmpabs_s_ ## op (long cc)              \
1223
{                                              \
1224
    int c;                                     \
1225
    FST0 &= ~FLOAT_SIGN32;                     \
1226
    FST1 &= ~FLOAT_SIGN32;                     \
1227
    c = cond;                                  \
1228
    update_fcr31();                            \
1229
    if (c)                                     \
1230
        SET_FP_COND(cc, env->fpu);             \
1231
    else                                       \
1232
        CLEAR_FP_COND(cc, env->fpu);           \
1233
}
1234

    
1235
flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
1236
{
1237
    if (float32_is_signaling_nan(a) ||
1238
        float32_is_signaling_nan(b) ||
1239
        (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
1240
        float_raise(float_flag_invalid, status);
1241
        return 1;
1242
    } else if (float32_is_nan(a) || float32_is_nan(b)) {
1243
        return 1;
1244
    } else {
1245
        return 0;
1246
    }
1247
}
1248

    
1249
/* NOTE: the comma operator will make "cond" to eval to false,
1250
 * but float*_is_unordered() is still called. */
1251
FOP_COND_S(f,   (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0))
1252
FOP_COND_S(un,  float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status))
1253
FOP_COND_S(eq,  !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1254
FOP_COND_S(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)  || float32_eq(FST0, FST1, &env->fpu->fp_status))
1255
FOP_COND_S(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1256
FOP_COND_S(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)  || float32_lt(FST0, FST1, &env->fpu->fp_status))
1257
FOP_COND_S(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1258
FOP_COND_S(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)  || float32_le(FST0, FST1, &env->fpu->fp_status))
1259
/* NOTE: the comma operator will make "cond" to eval to false,
1260
 * but float*_is_unordered() is still called. */
1261
FOP_COND_S(sf,  (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0))
1262
FOP_COND_S(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status))
1263
FOP_COND_S(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1264
FOP_COND_S(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)  || float32_eq(FST0, FST1, &env->fpu->fp_status))
1265
FOP_COND_S(lt,  !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1266
FOP_COND_S(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)  || float32_lt(FST0, FST1, &env->fpu->fp_status))
1267
FOP_COND_S(le,  !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1268
FOP_COND_S(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)  || float32_le(FST0, FST1, &env->fpu->fp_status))
1269

    
1270
#define FOP_COND_PS(op, condl, condh)          \
1271
void do_cmp_ps_ ## op (long cc)                \
1272
{                                              \
1273
    int cl = condl;                            \
1274
    int ch = condh;                            \
1275
    update_fcr31();                            \
1276
    if (cl)                                    \
1277
        SET_FP_COND(cc, env->fpu);             \
1278
    else                                       \
1279
        CLEAR_FP_COND(cc, env->fpu);           \
1280
    if (ch)                                    \
1281
        SET_FP_COND(cc + 1, env->fpu);         \
1282
    else                                       \
1283
        CLEAR_FP_COND(cc + 1, env->fpu);       \
1284
}                                              \
1285
void do_cmpabs_ps_ ## op (long cc)             \
1286
{                                              \
1287
    int cl, ch;                                \
1288
    FST0 &= ~FLOAT_SIGN32;                     \
1289
    FSTH0 &= ~FLOAT_SIGN32;                    \
1290
    FST1 &= ~FLOAT_SIGN32;                     \
1291
    FSTH1 &= ~FLOAT_SIGN32;                    \
1292
    cl = condl;                                \
1293
    ch = condh;                                \
1294
    update_fcr31();                            \
1295
    if (cl)                                    \
1296
        SET_FP_COND(cc, env->fpu);             \
1297
    else                                       \
1298
        CLEAR_FP_COND(cc, env->fpu);           \
1299
    if (ch)                                    \
1300
        SET_FP_COND(cc + 1, env->fpu);         \
1301
    else                                       \
1302
        CLEAR_FP_COND(cc + 1, env->fpu);       \
1303
}
1304

    
1305
/* NOTE: the comma operator will make "cond" to eval to false,
1306
 * but float*_is_unordered() is still called. */
1307
FOP_COND_PS(f,   (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0),
1308
                 (float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1309
FOP_COND_PS(un,  float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status),
1310
                 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status))
1311
FOP_COND_PS(eq,  !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)   && float32_eq(FST0, FST1, &env->fpu->fp_status),
1312
                 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1313
FOP_COND_PS(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)    || float32_eq(FST0, FST1, &env->fpu->fp_status),
1314
                 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1315
FOP_COND_PS(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)   && float32_lt(FST0, FST1, &env->fpu->fp_status),
1316
                 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1317
FOP_COND_PS(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)    || float32_lt(FST0, FST1, &env->fpu->fp_status),
1318
                 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1319
FOP_COND_PS(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)   && float32_le(FST0, FST1, &env->fpu->fp_status),
1320
                 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1321
FOP_COND_PS(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)    || float32_le(FST0, FST1, &env->fpu->fp_status),
1322
                 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1323
/* NOTE: the comma operator will make "cond" to eval to false,
1324
 * but float*_is_unordered() is still called. */
1325
FOP_COND_PS(sf,  (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0),
1326
                 (float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1327
FOP_COND_PS(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status),
1328
                 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status))
1329
FOP_COND_PS(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)   && float32_eq(FST0, FST1, &env->fpu->fp_status),
1330
                 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1331
FOP_COND_PS(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)    || float32_eq(FST0, FST1, &env->fpu->fp_status),
1332
                 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1333
FOP_COND_PS(lt,  !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)   && float32_lt(FST0, FST1, &env->fpu->fp_status),
1334
                 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1335
FOP_COND_PS(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)    || float32_lt(FST0, FST1, &env->fpu->fp_status),
1336
                 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1337
FOP_COND_PS(le,  !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)   && float32_le(FST0, FST1, &env->fpu->fp_status),
1338
                 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1339
FOP_COND_PS(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)    || float32_le(FST0, FST1, &env->fpu->fp_status),
1340
                 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))