Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ d26968ec

History | View | Annotate | Download (79.3 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdlib.h>
21
#include "exec.h"
22

    
23
#include "host-utils.h"
24

    
25
/*****************************************************************************/
26
/* Exceptions processing helpers */
27

    
28
void do_raise_exception_err (uint32_t exception, int error_code)
29
{
30
#if 1
31
    if (logfile && exception < 0x100)
32
        fprintf(logfile, "%s: %d %d\n", __func__, exception, error_code);
33
#endif
34
    env->exception_index = exception;
35
    env->error_code = error_code;
36
    cpu_loop_exit();
37
}
38

    
39
void do_raise_exception (uint32_t exception)
40
{
41
    do_raise_exception_err(exception, 0);
42
}
43

    
44
void do_interrupt_restart (void)
45
{
46
    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
47
        !(env->CP0_Status & (1 << CP0St_ERL)) &&
48
        !(env->hflags & MIPS_HFLAG_DM) &&
49
        (env->CP0_Status & (1 << CP0St_IE)) &&
50
        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
51
        env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
52
        do_raise_exception(EXCP_EXT_INTERRUPT);
53
    }
54
}
55

    
56
void do_restore_state (void *pc_ptr)
57
{
58
    TranslationBlock *tb;
59
    unsigned long pc = (unsigned long) pc_ptr;
60
    
61
    tb = tb_find_pc (pc);
62
    if (tb) {
63
        cpu_restore_state (tb, env, pc, NULL);
64
    }
65
}
66

    
67
target_ulong do_clo (target_ulong t0)
68
{
69
    return clo32(t0);
70
}
71

    
72
target_ulong do_clz (target_ulong t0)
73
{
74
    return clz32(t0);
75
}
76

    
77
#if defined(TARGET_MIPS64)
78
target_ulong do_dclo (target_ulong t0)
79
{
80
    return clo64(t0);
81
}
82

    
83
target_ulong do_dclz (target_ulong t0)
84
{
85
    return clz64(t0);
86
}
87
#endif /* TARGET_MIPS64 */
88

    
89
/* 64 bits arithmetic for 32 bits hosts */
90
static always_inline uint64_t get_HILO (void)
91
{
92
    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
93
}
94

    
95
static always_inline void set_HILO (uint64_t HILO)
96
{
97
    env->active_tc.LO[0] = (int32_t)HILO;
98
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
99
}
100

    
101
static always_inline void set_HIT0_LO (target_ulong t0, uint64_t HILO)
102
{
103
    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
104
    t0 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
105
}
106

    
107
static always_inline void set_HI_LOT0 (target_ulong t0, uint64_t HILO)
108
{
109
    t0 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
110
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
111
}
112

    
113
#if TARGET_LONG_BITS > HOST_LONG_BITS
114
void do_madd (target_ulong t0, target_ulong t1)
115
{
116
    int64_t tmp;
117

    
118
    tmp = ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
119
    set_HILO((int64_t)get_HILO() + tmp);
120
}
121

    
122
void do_maddu (target_ulong t0, target_ulong t1)
123
{
124
    uint64_t tmp;
125

    
126
    tmp = ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
127
    set_HILO(get_HILO() + tmp);
128
}
129

    
130
void do_msub (target_ulong t0, target_ulong t1)
131
{
132
    int64_t tmp;
133

    
134
    tmp = ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
135
    set_HILO((int64_t)get_HILO() - tmp);
136
}
137

    
138
void do_msubu (target_ulong t0, target_ulong t1)
139
{
140
    uint64_t tmp;
141

    
142
    tmp = ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
143
    set_HILO(get_HILO() - tmp);
144
}
145
#endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
146

    
147
/* Multiplication variants of the vr54xx. */
148
target_ulong do_muls (target_ulong t0, target_ulong t1)
149
{
150
    set_HI_LOT0(t0, 0 - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
151

    
152
    return t0;
153
}
154

    
155
target_ulong do_mulsu (target_ulong t0, target_ulong t1)
156
{
157
    set_HI_LOT0(t0, 0 - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
158

    
159
    return t0;
160
}
161

    
162
target_ulong do_macc (target_ulong t0, target_ulong t1)
163
{
164
    set_HI_LOT0(t0, ((int64_t)get_HILO()) + ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
165

    
166
    return t0;
167
}
168

    
169
target_ulong do_macchi (target_ulong t0, target_ulong t1)
170
{
171
    set_HIT0_LO(t0, ((int64_t)get_HILO()) + ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
172

    
173
    return t0;
174
}
175

    
176
target_ulong do_maccu (target_ulong t0, target_ulong t1)
177
{
178
    set_HI_LOT0(t0, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
179

    
180
    return t0;
181
}
182

    
183
target_ulong do_macchiu (target_ulong t0, target_ulong t1)
184
{
185
    set_HIT0_LO(t0, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
186

    
187
    return t0;
188
}
189

    
190
target_ulong do_msac (target_ulong t0, target_ulong t1)
191
{
192
    set_HI_LOT0(t0, ((int64_t)get_HILO()) - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
193

    
194
    return t0;
195
}
196

    
197
target_ulong do_msachi (target_ulong t0, target_ulong t1)
198
{
199
    set_HIT0_LO(t0, ((int64_t)get_HILO()) - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
200

    
201
    return t0;
202
}
203

    
204
target_ulong do_msacu (target_ulong t0, target_ulong t1)
205
{
206
    set_HI_LOT0(t0, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
207

    
208
    return t0;
209
}
210

    
211
target_ulong do_msachiu (target_ulong t0, target_ulong t1)
212
{
213
    set_HIT0_LO(t0, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
214

    
215
    return t0;
216
}
217

    
218
target_ulong do_mulhi (target_ulong t0, target_ulong t1)
219
{
220
    set_HIT0_LO(t0, (int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
221

    
222
    return t0;
223
}
224

    
225
target_ulong do_mulhiu (target_ulong t0, target_ulong t1)
226
{
227
    set_HIT0_LO(t0, (uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
228

    
229
    return t0;
230
}
231

    
232
target_ulong do_mulshi (target_ulong t0, target_ulong t1)
233
{
234
    set_HIT0_LO(t0, 0 - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
235

    
236
    return t0;
237
}
238

    
239
target_ulong do_mulshiu (target_ulong t0, target_ulong t1)
240
{
241
    set_HIT0_LO(t0, 0 - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
242

    
243
    return t0;
244
}
245

    
246
#ifdef TARGET_MIPS64
247
void do_dmult (target_ulong t0, target_ulong t1)
248
{
249
    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), t0, t1);
250
}
251

    
252
void do_dmultu (target_ulong t0, target_ulong t1)
253
{
254
    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), t0, t1);
255
}
256
#endif
257

    
258
#ifdef TARGET_WORDS_BIGENDIAN
259
#define GET_LMASK(v) ((v) & 3)
260
#define GET_OFFSET(addr, offset) (addr + (offset))
261
#else
262
#define GET_LMASK(v) (((v) & 3) ^ 3)
263
#define GET_OFFSET(addr, offset) (addr - (offset))
264
#endif
265

    
266
target_ulong do_lwl(target_ulong t0, target_ulong t1, int mem_idx)
267
{
268
    target_ulong tmp;
269

    
270
#ifdef CONFIG_USER_ONLY
271
#define ldfun ldub_raw
272
#else
273
    int (*ldfun)(target_ulong);
274

    
275
    switch (mem_idx)
276
    {
277
    case 0: ldfun = ldub_kernel; break;
278
    case 1: ldfun = ldub_super; break;
279
    default:
280
    case 2: ldfun = ldub_user; break;
281
    }
282
#endif
283
    tmp = ldfun(t0);
284
    t1 = (t1 & 0x00FFFFFF) | (tmp << 24);
285

    
286
    if (GET_LMASK(t0) <= 2) {
287
        tmp = ldfun(GET_OFFSET(t0, 1));
288
        t1 = (t1 & 0xFF00FFFF) | (tmp << 16);
289
    }
290

    
291
    if (GET_LMASK(t0) <= 1) {
292
        tmp = ldfun(GET_OFFSET(t0, 2));
293
        t1 = (t1 & 0xFFFF00FF) | (tmp << 8);
294
    }
295

    
296
    if (GET_LMASK(t0) == 0) {
297
        tmp = ldfun(GET_OFFSET(t0, 3));
298
        t1 = (t1 & 0xFFFFFF00) | tmp;
299
    }
300
    return (int32_t)t1;
301
}
302

    
303
target_ulong do_lwr(target_ulong t0, target_ulong t1, int mem_idx)
304
{
305
    target_ulong tmp;
306

    
307
#ifdef CONFIG_USER_ONLY
308
#define ldfun ldub_raw
309
#else
310
    int (*ldfun)(target_ulong);
311

    
312
    switch (mem_idx)
313
    {
314
    case 0: ldfun = ldub_kernel; break;
315
    case 1: ldfun = ldub_super; break;
316
    default:
317
    case 2: ldfun = ldub_user; break;
318
    }
319
#endif
320
    tmp = ldfun(t0);
321
    t1 = (t1 & 0xFFFFFF00) | tmp;
322

    
323
    if (GET_LMASK(t0) >= 1) {
324
        tmp = ldfun(GET_OFFSET(t0, -1));
325
        t1 = (t1 & 0xFFFF00FF) | (tmp << 8);
326
    }
327

    
328
    if (GET_LMASK(t0) >= 2) {
329
        tmp = ldfun(GET_OFFSET(t0, -2));
330
        t1 = (t1 & 0xFF00FFFF) | (tmp << 16);
331
    }
332

    
333
    if (GET_LMASK(t0) == 3) {
334
        tmp = ldfun(GET_OFFSET(t0, -3));
335
        t1 = (t1 & 0x00FFFFFF) | (tmp << 24);
336
    }
337
    return (int32_t)t1;
338
}
339

    
340
void do_swl(target_ulong t0, target_ulong t1, int mem_idx)
341
{
342
#ifdef CONFIG_USER_ONLY
343
#define stfun stb_raw
344
#else
345
    void (*stfun)(target_ulong, int);
346

    
347
    switch (mem_idx)
348
    {
349
    case 0: stfun = stb_kernel; break;
350
    case 1: stfun = stb_super; break;
351
    default:
352
    case 2: stfun = stb_user; break;
353
    }
354
#endif
355
    stfun(t0, (uint8_t)(t1 >> 24));
356

    
357
    if (GET_LMASK(t0) <= 2)
358
        stfun(GET_OFFSET(t0, 1), (uint8_t)(t1 >> 16));
359

    
360
    if (GET_LMASK(t0) <= 1)
361
        stfun(GET_OFFSET(t0, 2), (uint8_t)(t1 >> 8));
362

    
363
    if (GET_LMASK(t0) == 0)
364
        stfun(GET_OFFSET(t0, 3), (uint8_t)t1);
365
}
366

    
367
void do_swr(target_ulong t0, target_ulong t1, int mem_idx)
368
{
369
#ifdef CONFIG_USER_ONLY
370
#define stfun stb_raw
371
#else
372
    void (*stfun)(target_ulong, int);
373

    
374
    switch (mem_idx)
375
    {
376
    case 0: stfun = stb_kernel; break;
377
    case 1: stfun = stb_super; break;
378
    default:
379
    case 2: stfun = stb_user; break;
380
    }
381
#endif
382
    stfun(t0, (uint8_t)t1);
383

    
384
    if (GET_LMASK(t0) >= 1)
385
        stfun(GET_OFFSET(t0, -1), (uint8_t)(t1 >> 8));
386

    
387
    if (GET_LMASK(t0) >= 2)
388
        stfun(GET_OFFSET(t0, -2), (uint8_t)(t1 >> 16));
389

    
390
    if (GET_LMASK(t0) == 3)
391
        stfun(GET_OFFSET(t0, -3), (uint8_t)(t1 >> 24));
392
}
393

    
394
#if defined(TARGET_MIPS64)
395
/* "half" load and stores.  We must do the memory access inline,
396
   or fault handling won't work.  */
397

    
398
#ifdef TARGET_WORDS_BIGENDIAN
399
#define GET_LMASK64(v) ((v) & 7)
400
#else
401
#define GET_LMASK64(v) (((v) & 7) ^ 7)
402
#endif
403

    
404
target_ulong do_ldl(target_ulong t0, target_ulong t1, int mem_idx)
405
{
406
    uint64_t tmp;
407

    
408
#ifdef CONFIG_USER_ONLY
409
#define ldfun ldub_raw
410
#else
411
    int (*ldfun)(target_ulong);
412

    
413
    switch (mem_idx)
414
    {
415
    case 0: ldfun = ldub_kernel; break;
416
    case 1: ldfun = ldub_super; break;
417
    default:
418
    case 2: ldfun = ldub_user; break;
419
    }
420
#endif
421
    tmp = ldfun(t0);
422
    t1 = (t1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
423

    
424
    if (GET_LMASK64(t0) <= 6) {
425
        tmp = ldfun(GET_OFFSET(t0, 1));
426
        t1 = (t1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
427
    }
428

    
429
    if (GET_LMASK64(t0) <= 5) {
430
        tmp = ldfun(GET_OFFSET(t0, 2));
431
        t1 = (t1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
432
    }
433

    
434
    if (GET_LMASK64(t0) <= 4) {
435
        tmp = ldfun(GET_OFFSET(t0, 3));
436
        t1 = (t1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
437
    }
438

    
439
    if (GET_LMASK64(t0) <= 3) {
440
        tmp = ldfun(GET_OFFSET(t0, 4));
441
        t1 = (t1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
442
    }
443

    
444
    if (GET_LMASK64(t0) <= 2) {
445
        tmp = ldfun(GET_OFFSET(t0, 5));
446
        t1 = (t1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
447
    }
448

    
449
    if (GET_LMASK64(t0) <= 1) {
450
        tmp = ldfun(GET_OFFSET(t0, 6));
451
        t1 = (t1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
452
    }
453

    
454
    if (GET_LMASK64(t0) == 0) {
455
        tmp = ldfun(GET_OFFSET(t0, 7));
456
        t1 = (t1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
457
    }
458

    
459
    return t1;
460
}
461

    
462
target_ulong do_ldr(target_ulong t0, target_ulong t1, int mem_idx)
463
{
464
    uint64_t tmp;
465

    
466
#ifdef CONFIG_USER_ONLY
467
#define ldfun ldub_raw
468
#else
469
    int (*ldfun)(target_ulong);
470

    
471
    switch (mem_idx)
472
    {
473
    case 0: ldfun = ldub_kernel; break;
474
    case 1: ldfun = ldub_super; break;
475
    default:
476
    case 2: ldfun = ldub_user; break;
477
    }
478
#endif
479
    tmp = ldfun(t0);
480
    t1 = (t1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
481

    
482
    if (GET_LMASK64(t0) >= 1) {
483
        tmp = ldfun(GET_OFFSET(t0, -1));
484
        t1 = (t1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
485
    }
486

    
487
    if (GET_LMASK64(t0) >= 2) {
488
        tmp = ldfun(GET_OFFSET(t0, -2));
489
        t1 = (t1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
490
    }
491

    
492
    if (GET_LMASK64(t0) >= 3) {
493
        tmp = ldfun(GET_OFFSET(t0, -3));
494
        t1 = (t1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
495
    }
496

    
497
    if (GET_LMASK64(t0) >= 4) {
498
        tmp = ldfun(GET_OFFSET(t0, -4));
499
        t1 = (t1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
500
    }
501

    
502
    if (GET_LMASK64(t0) >= 5) {
503
        tmp = ldfun(GET_OFFSET(t0, -5));
504
        t1 = (t1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
505
    }
506

    
507
    if (GET_LMASK64(t0) >= 6) {
508
        tmp = ldfun(GET_OFFSET(t0, -6));
509
        t1 = (t1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
510
    }
511

    
512
    if (GET_LMASK64(t0) == 7) {
513
        tmp = ldfun(GET_OFFSET(t0, -7));
514
        t1 = (t1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
515
    }
516

    
517
    return t1;
518
}
519

    
520
void do_sdl(target_ulong t0, target_ulong t1, int mem_idx)
521
{
522
#ifdef CONFIG_USER_ONLY
523
#define stfun stb_raw
524
#else
525
    void (*stfun)(target_ulong, int);
526

    
527
    switch (mem_idx)
528
    {
529
    case 0: stfun = stb_kernel; break;
530
    case 1: stfun = stb_super; break;
531
    default:
532
    case 2: stfun = stb_user; break;
533
    }
534
#endif
535
    stfun(t0, (uint8_t)(t1 >> 56));
536

    
537
    if (GET_LMASK64(t0) <= 6)
538
        stfun(GET_OFFSET(t0, 1), (uint8_t)(t1 >> 48));
539

    
540
    if (GET_LMASK64(t0) <= 5)
541
        stfun(GET_OFFSET(t0, 2), (uint8_t)(t1 >> 40));
542

    
543
    if (GET_LMASK64(t0) <= 4)
544
        stfun(GET_OFFSET(t0, 3), (uint8_t)(t1 >> 32));
545

    
546
    if (GET_LMASK64(t0) <= 3)
547
        stfun(GET_OFFSET(t0, 4), (uint8_t)(t1 >> 24));
548

    
549
    if (GET_LMASK64(t0) <= 2)
550
        stfun(GET_OFFSET(t0, 5), (uint8_t)(t1 >> 16));
551

    
552
    if (GET_LMASK64(t0) <= 1)
553
        stfun(GET_OFFSET(t0, 6), (uint8_t)(t1 >> 8));
554

    
555
    if (GET_LMASK64(t0) <= 0)
556
        stfun(GET_OFFSET(t0, 7), (uint8_t)t1);
557
}
558

    
559
void do_sdr(target_ulong t0, target_ulong t1, int mem_idx)
560
{
561
#ifdef CONFIG_USER_ONLY
562
#define stfun stb_raw
563
#else
564
    void (*stfun)(target_ulong, int);
565

    
566
    switch (mem_idx)
567
    {
568
    case 0: stfun = stb_kernel; break;
569
    case 1: stfun = stb_super; break;
570
     default:
571
    case 2: stfun = stb_user; break;
572
    }
573
#endif
574
    stfun(t0, (uint8_t)t1);
575

    
576
    if (GET_LMASK64(t0) >= 1)
577
        stfun(GET_OFFSET(t0, -1), (uint8_t)(t1 >> 8));
578

    
579
    if (GET_LMASK64(t0) >= 2)
580
        stfun(GET_OFFSET(t0, -2), (uint8_t)(t1 >> 16));
581

    
582
    if (GET_LMASK64(t0) >= 3)
583
        stfun(GET_OFFSET(t0, -3), (uint8_t)(t1 >> 24));
584

    
585
    if (GET_LMASK64(t0) >= 4)
586
        stfun(GET_OFFSET(t0, -4), (uint8_t)(t1 >> 32));
587

    
588
    if (GET_LMASK64(t0) >= 5)
589
        stfun(GET_OFFSET(t0, -5), (uint8_t)(t1 >> 40));
590

    
591
    if (GET_LMASK64(t0) >= 6)
592
        stfun(GET_OFFSET(t0, -6), (uint8_t)(t1 >> 48));
593

    
594
    if (GET_LMASK64(t0) == 7)
595
        stfun(GET_OFFSET(t0, -7), (uint8_t)(t1 >> 56));
596
}
597
#endif /* TARGET_MIPS64 */
598

    
599
#ifdef CONFIG_USER_ONLY
600
void do_mfc0_random (void)
601
{
602
    cpu_abort(env, "mfc0 random\n");
603
}
604

    
605
void do_mfc0_count (void)
606
{
607
    cpu_abort(env, "mfc0 count\n");
608
}
609

    
610
void cpu_mips_store_count(CPUState *env, uint32_t value)
611
{
612
    cpu_abort(env, "mtc0 count\n");
613
}
614

    
615
void cpu_mips_store_compare(CPUState *env, uint32_t value)
616
{
617
    cpu_abort(env, "mtc0 compare\n");
618
}
619

    
620
void cpu_mips_start_count(CPUState *env)
621
{
622
    cpu_abort(env, "start count\n");
623
}
624

    
625
void cpu_mips_stop_count(CPUState *env)
626
{
627
    cpu_abort(env, "stop count\n");
628
}
629

    
630
void cpu_mips_update_irq(CPUState *env)
631
{
632
    cpu_abort(env, "mtc0 status / mtc0 cause\n");
633
}
634

    
635
void do_mtc0_status_debug(uint32_t old, uint32_t val)
636
{
637
    cpu_abort(env, "mtc0 status debug\n");
638
}
639

    
640
void do_mtc0_status_irqraise_debug (void)
641
{
642
    cpu_abort(env, "mtc0 status irqraise debug\n");
643
}
644

    
645
void cpu_mips_tlb_flush (CPUState *env, int flush_global)
646
{
647
    cpu_abort(env, "mips_tlb_flush\n");
648
}
649

    
650
#else
651

    
652
/* CP0 helpers */
653
target_ulong do_mfc0_mvpcontrol (void)
654
{
655
    return env->mvp->CP0_MVPControl;
656
}
657

    
658
target_ulong do_mfc0_mvpconf0 (void)
659
{
660
    return env->mvp->CP0_MVPConf0;
661
}
662

    
663
target_ulong do_mfc0_mvpconf1 (void)
664
{
665
    return env->mvp->CP0_MVPConf1;
666
}
667

    
668
target_ulong do_mfc0_random (void)
669
{
670
    return (int32_t)cpu_mips_get_random(env);
671
}
672

    
673
target_ulong do_mfc0_tcstatus (void)
674
{
675
    return env->active_tc.CP0_TCStatus;
676
}
677

    
678
target_ulong do_mftc0_tcstatus(void)
679
{
680
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
681

    
682
    if (other_tc == env->current_tc)
683
        return env->active_tc.CP0_TCStatus;
684
    else
685
        return env->tcs[other_tc].CP0_TCStatus;
686
}
687

    
688
target_ulong do_mfc0_tcbind (void)
689
{
690
    return env->active_tc.CP0_TCBind;
691
}
692

    
693
target_ulong do_mftc0_tcbind(void)
694
{
695
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
696

    
697
    if (other_tc == env->current_tc)
698
        return env->active_tc.CP0_TCBind;
699
    else
700
        return env->tcs[other_tc].CP0_TCBind;
701
}
702

    
703
target_ulong do_mfc0_tcrestart (void)
704
{
705
    return env->active_tc.PC;
706
}
707

    
708
target_ulong do_mftc0_tcrestart(void)
709
{
710
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
711

    
712
    if (other_tc == env->current_tc)
713
        return env->active_tc.PC;
714
    else
715
        return env->tcs[other_tc].PC;
716
}
717

    
718
target_ulong do_mfc0_tchalt (void)
719
{
720
    return env->active_tc.CP0_TCHalt;
721
}
722

    
723
target_ulong do_mftc0_tchalt(void)
724
{
725
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
726

    
727
    if (other_tc == env->current_tc)
728
        return env->active_tc.CP0_TCHalt;
729
    else
730
        return env->tcs[other_tc].CP0_TCHalt;
731
}
732

    
733
target_ulong do_mfc0_tccontext (void)
734
{
735
    return env->active_tc.CP0_TCContext;
736
}
737

    
738
target_ulong do_mftc0_tccontext(void)
739
{
740
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
741

    
742
    if (other_tc == env->current_tc)
743
        return env->active_tc.CP0_TCContext;
744
    else
745
        return env->tcs[other_tc].CP0_TCContext;
746
}
747

    
748
target_ulong do_mfc0_tcschedule (void)
749
{
750
    return env->active_tc.CP0_TCSchedule;
751
}
752

    
753
target_ulong do_mftc0_tcschedule(void)
754
{
755
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
756

    
757
    if (other_tc == env->current_tc)
758
        return env->active_tc.CP0_TCSchedule;
759
    else
760
        return env->tcs[other_tc].CP0_TCSchedule;
761
}
762

    
763
target_ulong do_mfc0_tcschefback (void)
764
{
765
    return env->active_tc.CP0_TCScheFBack;
766
}
767

    
768
target_ulong do_mftc0_tcschefback(void)
769
{
770
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
771

    
772
    if (other_tc == env->current_tc)
773
        return env->active_tc.CP0_TCScheFBack;
774
    else
775
        return env->tcs[other_tc].CP0_TCScheFBack;
776
}
777

    
778
target_ulong do_mfc0_count (void)
779
{
780
    return (int32_t)cpu_mips_get_count(env);
781
}
782

    
783
target_ulong do_mftc0_entryhi(void)
784
{
785
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
786
    int32_t tcstatus;
787

    
788
    if (other_tc == env->current_tc)
789
        tcstatus = env->active_tc.CP0_TCStatus;
790
    else
791
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
792

    
793
    return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
794
}
795

    
796
target_ulong do_mftc0_status(void)
797
{
798
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
799
    target_ulong t0;
800
    int32_t tcstatus;
801

    
802
    if (other_tc == env->current_tc)
803
        tcstatus = env->active_tc.CP0_TCStatus;
804
    else
805
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
806

    
807
    t0 = env->CP0_Status & ~0xf1000018;
808
    t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
809
    t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
810
    t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
811

    
812
    return t0;
813
}
814

    
815
target_ulong do_mfc0_lladdr (void)
816
{
817
    return (int32_t)env->CP0_LLAddr >> 4;
818
}
819

    
820
target_ulong do_mfc0_watchlo (uint32_t sel)
821
{
822
    return (int32_t)env->CP0_WatchLo[sel];
823
}
824

    
825
target_ulong do_mfc0_watchhi (uint32_t sel)
826
{
827
    return env->CP0_WatchHi[sel];
828
}
829

    
830
target_ulong do_mfc0_debug (void)
831
{
832
    target_ulong t0 = env->CP0_Debug;
833
    if (env->hflags & MIPS_HFLAG_DM)
834
        t0 |= 1 << CP0DB_DM;
835

    
836
    return t0;
837
}
838

    
839
target_ulong do_mftc0_debug(void)
840
{
841
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
842
    int32_t tcstatus;
843

    
844
    if (other_tc == env->current_tc)
845
        tcstatus = env->active_tc.CP0_Debug_tcstatus;
846
    else
847
        tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
848

    
849
    /* XXX: Might be wrong, check with EJTAG spec. */
850
    return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
851
            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
852
}
853

    
854
#if defined(TARGET_MIPS64)
855
target_ulong do_dmfc0_tcrestart (void)
856
{
857
    return env->active_tc.PC;
858
}
859

    
860
target_ulong do_dmfc0_tchalt (void)
861
{
862
    return env->active_tc.CP0_TCHalt;
863
}
864

    
865
target_ulong do_dmfc0_tccontext (void)
866
{
867
    return env->active_tc.CP0_TCContext;
868
}
869

    
870
target_ulong do_dmfc0_tcschedule (void)
871
{
872
    return env->active_tc.CP0_TCSchedule;
873
}
874

    
875
target_ulong do_dmfc0_tcschefback (void)
876
{
877
    return env->active_tc.CP0_TCScheFBack;
878
}
879

    
880
target_ulong do_dmfc0_lladdr (void)
881
{
882
    return env->CP0_LLAddr >> 4;
883
}
884

    
885
target_ulong do_dmfc0_watchlo (uint32_t sel)
886
{
887
    return env->CP0_WatchLo[sel];
888
}
889
#endif /* TARGET_MIPS64 */
890

    
891
void do_mtc0_index (target_ulong t0)
892
{
893
    int num = 1;
894
    unsigned int tmp = env->tlb->nb_tlb;
895

    
896
    do {
897
        tmp >>= 1;
898
        num <<= 1;
899
    } while (tmp);
900
    env->CP0_Index = (env->CP0_Index & 0x80000000) | (t0 & (num - 1));
901
}
902

    
903
void do_mtc0_mvpcontrol (target_ulong t0)
904
{
905
    uint32_t mask = 0;
906
    uint32_t newval;
907

    
908
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
909
        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
910
                (1 << CP0MVPCo_EVP);
911
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
912
        mask |= (1 << CP0MVPCo_STLB);
913
    newval = (env->mvp->CP0_MVPControl & ~mask) | (t0 & mask);
914

    
915
    // TODO: Enable/disable shared TLB, enable/disable VPEs.
916

    
917
    env->mvp->CP0_MVPControl = newval;
918
}
919

    
920
void do_mtc0_vpecontrol (target_ulong t0)
921
{
922
    uint32_t mask;
923
    uint32_t newval;
924

    
925
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
926
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
927
    newval = (env->CP0_VPEControl & ~mask) | (t0 & mask);
928

    
929
    /* Yield scheduler intercept not implemented. */
930
    /* Gating storage scheduler intercept not implemented. */
931

    
932
    // TODO: Enable/disable TCs.
933

    
934
    env->CP0_VPEControl = newval;
935
}
936

    
937
void do_mtc0_vpeconf0 (target_ulong t0)
938
{
939
    uint32_t mask = 0;
940
    uint32_t newval;
941

    
942
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
943
        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
944
            mask |= (0xff << CP0VPEC0_XTC);
945
        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
946
    }
947
    newval = (env->CP0_VPEConf0 & ~mask) | (t0 & mask);
948

    
949
    // TODO: TC exclusive handling due to ERL/EXL.
950

    
951
    env->CP0_VPEConf0 = newval;
952
}
953

    
954
void do_mtc0_vpeconf1 (target_ulong t0)
955
{
956
    uint32_t mask = 0;
957
    uint32_t newval;
958

    
959
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
960
        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
961
                (0xff << CP0VPEC1_NCP1);
962
    newval = (env->CP0_VPEConf1 & ~mask) | (t0 & mask);
963

    
964
    /* UDI not implemented. */
965
    /* CP2 not implemented. */
966

    
967
    // TODO: Handle FPU (CP1) binding.
968

    
969
    env->CP0_VPEConf1 = newval;
970
}
971

    
972
void do_mtc0_yqmask (target_ulong t0)
973
{
974
    /* Yield qualifier inputs not implemented. */
975
    env->CP0_YQMask = 0x00000000;
976
}
977

    
978
void do_mtc0_vpeopt (target_ulong t0)
979
{
980
    env->CP0_VPEOpt = t0 & 0x0000ffff;
981
}
982

    
983
void do_mtc0_entrylo0 (target_ulong t0)
984
{
985
    /* Large physaddr (PABITS) not implemented */
986
    /* 1k pages not implemented */
987
    env->CP0_EntryLo0 = t0 & 0x3FFFFFFF;
988
}
989

    
990
void do_mtc0_tcstatus (target_ulong t0)
991
{
992
    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
993
    uint32_t newval;
994

    
995
    newval = (env->active_tc.CP0_TCStatus & ~mask) | (t0 & mask);
996

    
997
    // TODO: Sync with CP0_Status.
998

    
999
    env->active_tc.CP0_TCStatus = newval;
1000
}
1001

    
1002
void do_mttc0_tcstatus (target_ulong t0)
1003
{
1004
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1005

    
1006
    // TODO: Sync with CP0_Status.
1007

    
1008
    if (other_tc == env->current_tc)
1009
        env->active_tc.CP0_TCStatus = t0;
1010
    else
1011
        env->tcs[other_tc].CP0_TCStatus = t0;
1012
}
1013

    
1014
void do_mtc0_tcbind (target_ulong t0)
1015
{
1016
    uint32_t mask = (1 << CP0TCBd_TBE);
1017
    uint32_t newval;
1018

    
1019
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1020
        mask |= (1 << CP0TCBd_CurVPE);
1021
    newval = (env->active_tc.CP0_TCBind & ~mask) | (t0 & mask);
1022
    env->active_tc.CP0_TCBind = newval;
1023
}
1024

    
1025
void do_mttc0_tcbind (target_ulong t0)
1026
{
1027
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1028
    uint32_t mask = (1 << CP0TCBd_TBE);
1029
    uint32_t newval;
1030

    
1031
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1032
        mask |= (1 << CP0TCBd_CurVPE);
1033
    if (other_tc == env->current_tc) {
1034
        newval = (env->active_tc.CP0_TCBind & ~mask) | (t0 & mask);
1035
        env->active_tc.CP0_TCBind = newval;
1036
    } else {
1037
        newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (t0 & mask);
1038
        env->tcs[other_tc].CP0_TCBind = newval;
1039
    }
1040
}
1041

    
1042
void do_mtc0_tcrestart (target_ulong t0)
1043
{
1044
    env->active_tc.PC = t0;
1045
    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1046
    env->CP0_LLAddr = 0ULL;
1047
    /* MIPS16 not implemented. */
1048
}
1049

    
1050
void do_mttc0_tcrestart (target_ulong t0)
1051
{
1052
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1053

    
1054
    if (other_tc == env->current_tc) {
1055
        env->active_tc.PC = t0;
1056
        env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1057
        env->CP0_LLAddr = 0ULL;
1058
        /* MIPS16 not implemented. */
1059
    } else {
1060
        env->tcs[other_tc].PC = t0;
1061
        env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1062
        env->CP0_LLAddr = 0ULL;
1063
        /* MIPS16 not implemented. */
1064
    }
1065
}
1066

    
1067
void do_mtc0_tchalt (target_ulong t0)
1068
{
1069
    env->active_tc.CP0_TCHalt = t0 & 0x1;
1070

    
1071
    // TODO: Halt TC / Restart (if allocated+active) TC.
1072
}
1073

    
1074
void do_mttc0_tchalt (target_ulong t0)
1075
{
1076
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1077

    
1078
    // TODO: Halt TC / Restart (if allocated+active) TC.
1079

    
1080
    if (other_tc == env->current_tc)
1081
        env->active_tc.CP0_TCHalt = t0;
1082
    else
1083
        env->tcs[other_tc].CP0_TCHalt = t0;
1084
}
1085

    
1086
void do_mtc0_tccontext (target_ulong t0)
1087
{
1088
    env->active_tc.CP0_TCContext = t0;
1089
}
1090

    
1091
void do_mttc0_tccontext (target_ulong t0)
1092
{
1093
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1094

    
1095
    if (other_tc == env->current_tc)
1096
        env->active_tc.CP0_TCContext = t0;
1097
    else
1098
        env->tcs[other_tc].CP0_TCContext = t0;
1099
}
1100

    
1101
void do_mtc0_tcschedule (target_ulong t0)
1102
{
1103
    env->active_tc.CP0_TCSchedule = t0;
1104
}
1105

    
1106
void do_mttc0_tcschedule (target_ulong t0)
1107
{
1108
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1109

    
1110
    if (other_tc == env->current_tc)
1111
        env->active_tc.CP0_TCSchedule = t0;
1112
    else
1113
        env->tcs[other_tc].CP0_TCSchedule = t0;
1114
}
1115

    
1116
void do_mtc0_tcschefback (target_ulong t0)
1117
{
1118
    env->active_tc.CP0_TCScheFBack = t0;
1119
}
1120

    
1121
void do_mttc0_tcschefback (target_ulong t0)
1122
{
1123
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1124

    
1125
    if (other_tc == env->current_tc)
1126
        env->active_tc.CP0_TCScheFBack = t0;
1127
    else
1128
        env->tcs[other_tc].CP0_TCScheFBack = t0;
1129
}
1130

    
1131
void do_mtc0_entrylo1 (target_ulong t0)
1132
{
1133
    /* Large physaddr (PABITS) not implemented */
1134
    /* 1k pages not implemented */
1135
    env->CP0_EntryLo1 = t0 & 0x3FFFFFFF;
1136
}
1137

    
1138
void do_mtc0_context (target_ulong t0)
1139
{
1140
    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (t0 & ~0x007FFFFF);
1141
}
1142

    
1143
void do_mtc0_pagemask (target_ulong t0)
1144
{
1145
    /* 1k pages not implemented */
1146
    env->CP0_PageMask = t0 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1147
}
1148

    
1149
void do_mtc0_pagegrain (target_ulong t0)
1150
{
1151
    /* SmartMIPS not implemented */
1152
    /* Large physaddr (PABITS) not implemented */
1153
    /* 1k pages not implemented */
1154
    env->CP0_PageGrain = 0;
1155
}
1156

    
1157
void do_mtc0_wired (target_ulong t0)
1158
{
1159
    env->CP0_Wired = t0 % env->tlb->nb_tlb;
1160
}
1161

    
1162
void do_mtc0_srsconf0 (target_ulong t0)
1163
{
1164
    env->CP0_SRSConf0 |= t0 & env->CP0_SRSConf0_rw_bitmask;
1165
}
1166

    
1167
void do_mtc0_srsconf1 (target_ulong t0)
1168
{
1169
    env->CP0_SRSConf1 |= t0 & env->CP0_SRSConf1_rw_bitmask;
1170
}
1171

    
1172
void do_mtc0_srsconf2 (target_ulong t0)
1173
{
1174
    env->CP0_SRSConf2 |= t0 & env->CP0_SRSConf2_rw_bitmask;
1175
}
1176

    
1177
void do_mtc0_srsconf3 (target_ulong t0)
1178
{
1179
    env->CP0_SRSConf3 |= t0 & env->CP0_SRSConf3_rw_bitmask;
1180
}
1181

    
1182
void do_mtc0_srsconf4 (target_ulong t0)
1183
{
1184
    env->CP0_SRSConf4 |= t0 & env->CP0_SRSConf4_rw_bitmask;
1185
}
1186

    
1187
void do_mtc0_hwrena (target_ulong t0)
1188
{
1189
    env->CP0_HWREna = t0 & 0x0000000F;
1190
}
1191

    
1192
void do_mtc0_count (target_ulong t0)
1193
{
1194
    cpu_mips_store_count(env, t0);
1195
}
1196

    
1197
void do_mtc0_entryhi (target_ulong t0)
1198
{
1199
    target_ulong old, val;
1200

    
1201
    /* 1k pages not implemented */
1202
    val = t0 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1203
#if defined(TARGET_MIPS64)
1204
    val &= env->SEGMask;
1205
#endif
1206
    old = env->CP0_EntryHi;
1207
    env->CP0_EntryHi = val;
1208
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1209
        uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
1210
        env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
1211
    }
1212
    /* If the ASID changes, flush qemu's TLB.  */
1213
    if ((old & 0xFF) != (val & 0xFF))
1214
        cpu_mips_tlb_flush(env, 1);
1215
}
1216

    
1217
void do_mttc0_entryhi(target_ulong t0)
1218
{
1219
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1220
    int32_t tcstatus;
1221

    
1222
    env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (t0 & ~0xff);
1223
    if (other_tc == env->current_tc) {
1224
        tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (t0 & 0xff);
1225
        env->active_tc.CP0_TCStatus = tcstatus;
1226
    } else {
1227
        tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (t0 & 0xff);
1228
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1229
    }
1230
}
1231

    
1232
void do_mtc0_compare (target_ulong t0)
1233
{
1234
    cpu_mips_store_compare(env, t0);
1235
}
1236

    
1237
void do_mtc0_status (target_ulong t0)
1238
{
1239
    uint32_t val, old;
1240
    uint32_t mask = env->CP0_Status_rw_bitmask;
1241

    
1242
    val = t0 & mask;
1243
    old = env->CP0_Status;
1244
    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1245
    compute_hflags(env);
1246
    if (loglevel & CPU_LOG_EXEC)
1247
        do_mtc0_status_debug(old, val);
1248
    cpu_mips_update_irq(env);
1249
}
1250

    
1251
void do_mttc0_status(target_ulong t0)
1252
{
1253
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1254
    int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
1255

    
1256
    env->CP0_Status = t0 & ~0xf1000018;
1257
    tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (t0 & (0xf << CP0St_CU0));
1258
    tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((t0 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
1259
    tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((t0 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
1260
    if (other_tc == env->current_tc)
1261
        env->active_tc.CP0_TCStatus = tcstatus;
1262
    else
1263
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1264
}
1265

    
1266
void do_mtc0_intctl (target_ulong t0)
1267
{
1268
    /* vectored interrupts not implemented, no performance counters. */
1269
    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (t0 & 0x000002e0);
1270
}
1271

    
1272
void do_mtc0_srsctl (target_ulong t0)
1273
{
1274
    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1275
    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (t0 & mask);
1276
}
1277

    
1278
void do_mtc0_cause (target_ulong t0)
1279
{
1280
    uint32_t mask = 0x00C00300;
1281
    uint32_t old = env->CP0_Cause;
1282

    
1283
    if (env->insn_flags & ISA_MIPS32R2)
1284
        mask |= 1 << CP0Ca_DC;
1285

    
1286
    env->CP0_Cause = (env->CP0_Cause & ~mask) | (t0 & mask);
1287

    
1288
    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1289
        if (env->CP0_Cause & (1 << CP0Ca_DC))
1290
            cpu_mips_stop_count(env);
1291
        else
1292
            cpu_mips_start_count(env);
1293
    }
1294

    
1295
    /* Handle the software interrupt as an hardware one, as they
1296
       are very similar */
1297
    if (t0 & CP0Ca_IP_mask) {
1298
        cpu_mips_update_irq(env);
1299
    }
1300
}
1301

    
1302
void do_mtc0_ebase (target_ulong t0)
1303
{
1304
    /* vectored interrupts not implemented */
1305
    /* Multi-CPU not implemented */
1306
    env->CP0_EBase = 0x80000000 | (t0 & 0x3FFFF000);
1307
}
1308

    
1309
void do_mtc0_config0 (target_ulong t0)
1310
{
1311
    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (t0 & 0x00000007);
1312
}
1313

    
1314
void do_mtc0_config2 (target_ulong t0)
1315
{
1316
    /* tertiary/secondary caches not implemented */
1317
    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1318
}
1319

    
1320
void do_mtc0_watchlo (target_ulong t0, uint32_t sel)
1321
{
1322
    /* Watch exceptions for instructions, data loads, data stores
1323
       not implemented. */
1324
    env->CP0_WatchLo[sel] = (t0 & ~0x7);
1325
}
1326

    
1327
void do_mtc0_watchhi (target_ulong t0, uint32_t sel)
1328
{
1329
    env->CP0_WatchHi[sel] = (t0 & 0x40FF0FF8);
1330
    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & t0 & 0x7);
1331
}
1332

    
1333
void do_mtc0_xcontext (target_ulong t0)
1334
{
1335
    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1336
    env->CP0_XContext = (env->CP0_XContext & mask) | (t0 & ~mask);
1337
}
1338

    
1339
void do_mtc0_framemask (target_ulong t0)
1340
{
1341
    env->CP0_Framemask = t0; /* XXX */
1342
}
1343

    
1344
void do_mtc0_debug (target_ulong t0)
1345
{
1346
    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (t0 & 0x13300120);
1347
    if (t0 & (1 << CP0DB_DM))
1348
        env->hflags |= MIPS_HFLAG_DM;
1349
    else
1350
        env->hflags &= ~MIPS_HFLAG_DM;
1351
}
1352

    
1353
void do_mttc0_debug(target_ulong t0)
1354
{
1355
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1356
    uint32_t val = t0 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1357

    
1358
    /* XXX: Might be wrong, check with EJTAG spec. */
1359
    if (other_tc == env->current_tc)
1360
        env->active_tc.CP0_Debug_tcstatus = val;
1361
    else
1362
        env->tcs[other_tc].CP0_Debug_tcstatus = val;
1363
    env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1364
                     (t0 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1365
}
1366

    
1367
void do_mtc0_performance0 (target_ulong t0)
1368
{
1369
    env->CP0_Performance0 = t0 & 0x000007ff;
1370
}
1371

    
1372
void do_mtc0_taglo (target_ulong t0)
1373
{
1374
    env->CP0_TagLo = t0 & 0xFFFFFCF6;
1375
}
1376

    
1377
void do_mtc0_datalo (target_ulong t0)
1378
{
1379
    env->CP0_DataLo = t0; /* XXX */
1380
}
1381

    
1382
void do_mtc0_taghi (target_ulong t0)
1383
{
1384
    env->CP0_TagHi = t0; /* XXX */
1385
}
1386

    
1387
void do_mtc0_datahi (target_ulong t0)
1388
{
1389
    env->CP0_DataHi = t0; /* XXX */
1390
}
1391

    
1392
void do_mtc0_status_debug(uint32_t old, uint32_t val)
1393
{
1394
    fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",
1395
            old, old & env->CP0_Cause & CP0Ca_IP_mask,
1396
            val, val & env->CP0_Cause & CP0Ca_IP_mask,
1397
            env->CP0_Cause);
1398
    switch (env->hflags & MIPS_HFLAG_KSU) {
1399
    case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
1400
    case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
1401
    case MIPS_HFLAG_KM: fputs("\n", logfile); break;
1402
    default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1403
    }
1404
}
1405

    
1406
void do_mtc0_status_irqraise_debug(void)
1407
{
1408
    fprintf(logfile, "Raise pending IRQs\n");
1409
}
1410
#endif /* !CONFIG_USER_ONLY */
1411

    
1412
/* MIPS MT functions */
1413
target_ulong do_mftgpr(target_ulong t0, uint32_t sel)
1414
{
1415
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1416

    
1417
    if (other_tc == env->current_tc)
1418
        return env->active_tc.gpr[sel];
1419
    else
1420
        return env->tcs[other_tc].gpr[sel];
1421
}
1422

    
1423
target_ulong do_mftlo(target_ulong t0, uint32_t sel)
1424
{
1425
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1426

    
1427
    if (other_tc == env->current_tc)
1428
        return env->active_tc.LO[sel];
1429
    else
1430
        return env->tcs[other_tc].LO[sel];
1431
}
1432

    
1433
target_ulong do_mfthi(target_ulong t0, uint32_t sel)
1434
{
1435
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1436

    
1437
    if (other_tc == env->current_tc)
1438
        return env->active_tc.HI[sel];
1439
    else
1440
        return env->tcs[other_tc].HI[sel];
1441
}
1442

    
1443
target_ulong do_mftacx(target_ulong t0, uint32_t sel)
1444
{
1445
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1446

    
1447
    if (other_tc == env->current_tc)
1448
        return env->active_tc.ACX[sel];
1449
    else
1450
        return env->tcs[other_tc].ACX[sel];
1451
}
1452

    
1453
target_ulong do_mftdsp(target_ulong t0)
1454
{
1455
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1456

    
1457
    if (other_tc == env->current_tc)
1458
        return env->active_tc.DSPControl;
1459
    else
1460
        return env->tcs[other_tc].DSPControl;
1461
}
1462

    
1463
void do_mttgpr(target_ulong t0, uint32_t sel)
1464
{
1465
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1466

    
1467
    if (other_tc == env->current_tc)
1468
        env->active_tc.gpr[sel] = t0;
1469
    else
1470
        env->tcs[other_tc].gpr[sel] = t0;
1471
}
1472

    
1473
void do_mttlo(target_ulong t0, uint32_t sel)
1474
{
1475
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1476

    
1477
    if (other_tc == env->current_tc)
1478
        env->active_tc.LO[sel] = t0;
1479
    else
1480
        env->tcs[other_tc].LO[sel] = t0;
1481
}
1482

    
1483
void do_mtthi(target_ulong t0, uint32_t sel)
1484
{
1485
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1486

    
1487
    if (other_tc == env->current_tc)
1488
        env->active_tc.HI[sel] = t0;
1489
    else
1490
        env->tcs[other_tc].HI[sel] = t0;
1491
}
1492

    
1493
void do_mttacx(target_ulong t0, uint32_t sel)
1494
{
1495
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1496

    
1497
    if (other_tc == env->current_tc)
1498
        env->active_tc.ACX[sel] = t0;
1499
    else
1500
        env->tcs[other_tc].ACX[sel] = t0;
1501
}
1502

    
1503
void do_mttdsp(target_ulong t0)
1504
{
1505
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1506

    
1507
    if (other_tc == env->current_tc)
1508
        env->active_tc.DSPControl = t0;
1509
    else
1510
        env->tcs[other_tc].DSPControl = t0;
1511
}
1512

    
1513
/* MIPS MT functions */
1514
target_ulong do_dmt(target_ulong t0)
1515
{
1516
    // TODO
1517
    t0 = 0;
1518
    // rt = t0
1519

    
1520
    return t0;
1521
}
1522

    
1523
target_ulong do_emt(target_ulong t0)
1524
{
1525
    // TODO
1526
    t0 = 0;
1527
    // rt = t0
1528

    
1529
    return t0;
1530
}
1531

    
1532
target_ulong do_dvpe(target_ulong t0)
1533
{
1534
    // TODO
1535
    t0 = 0;
1536
    // rt = t0
1537

    
1538
    return t0;
1539
}
1540

    
1541
target_ulong do_evpe(target_ulong t0)
1542
{
1543
    // TODO
1544
    t0 = 0;
1545
    // rt = t0
1546

    
1547
    return t0;
1548
}
1549

    
1550
void do_fork(target_ulong t0, target_ulong t1)
1551
{
1552
    // t0 = rt, t1 = rs
1553
    t0 = 0;
1554
    // TODO: store to TC register
1555
}
1556

    
1557
target_ulong do_yield(target_ulong t0)
1558
{
1559
    if (t0 < 0) {
1560
        /* No scheduling policy implemented. */
1561
        if (t0 != -2) {
1562
            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1563
                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1564
                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1565
                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1566
                do_raise_exception(EXCP_THREAD);
1567
            }
1568
        }
1569
    } else if (t0 == 0) {
1570
        if (0 /* TODO: TC underflow */) {
1571
            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1572
            do_raise_exception(EXCP_THREAD);
1573
        } else {
1574
            // TODO: Deallocate TC
1575
        }
1576
    } else if (t0 > 0) {
1577
        /* Yield qualifier inputs not implemented. */
1578
        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1579
        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1580
        do_raise_exception(EXCP_THREAD);
1581
    }
1582
    return env->CP0_YQMask;
1583
}
1584

    
1585
/* CP1 functions */
1586
void fpu_handle_exception(void)
1587
{
1588
#ifdef CONFIG_SOFTFLOAT
1589
    int flags = get_float_exception_flags(&env->fpu->fp_status);
1590
    unsigned int cpuflags = 0, enable, cause = 0;
1591

    
1592
    enable = GET_FP_ENABLE(env->fpu->fcr31);
1593

    
1594
    /* determine current flags */
1595
    if (flags & float_flag_invalid) {
1596
        cpuflags |= FP_INVALID;
1597
        cause |= FP_INVALID & enable;
1598
    }
1599
    if (flags & float_flag_divbyzero) {
1600
        cpuflags |= FP_DIV0;
1601
        cause |= FP_DIV0 & enable;
1602
    }
1603
    if (flags & float_flag_overflow) {
1604
        cpuflags |= FP_OVERFLOW;
1605
        cause |= FP_OVERFLOW & enable;
1606
    }
1607
    if (flags & float_flag_underflow) {
1608
        cpuflags |= FP_UNDERFLOW;
1609
        cause |= FP_UNDERFLOW & enable;
1610
    }
1611
    if (flags & float_flag_inexact) {
1612
        cpuflags |= FP_INEXACT;
1613
        cause |= FP_INEXACT & enable;
1614
    }
1615
    SET_FP_FLAGS(env->fpu->fcr31, cpuflags);
1616
    SET_FP_CAUSE(env->fpu->fcr31, cause);
1617
#else
1618
    SET_FP_FLAGS(env->fpu->fcr31, 0);
1619
    SET_FP_CAUSE(env->fpu->fcr31, 0);
1620
#endif
1621
}
1622

    
1623
#ifndef CONFIG_USER_ONLY
1624
/* TLB management */
1625
void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1626
{
1627
    /* Flush qemu's TLB and discard all shadowed entries.  */
1628
    tlb_flush (env, flush_global);
1629
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
1630
}
1631

    
1632
static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1633
{
1634
    /* Discard entries from env->tlb[first] onwards.  */
1635
    while (env->tlb->tlb_in_use > first) {
1636
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1637
    }
1638
}
1639

    
1640
static void r4k_fill_tlb (int idx)
1641
{
1642
    r4k_tlb_t *tlb;
1643

    
1644
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1645
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1646
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1647
#if defined(TARGET_MIPS64)
1648
    tlb->VPN &= env->SEGMask;
1649
#endif
1650
    tlb->ASID = env->CP0_EntryHi & 0xFF;
1651
    tlb->PageMask = env->CP0_PageMask;
1652
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1653
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1654
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1655
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1656
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1657
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1658
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1659
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1660
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1661
}
1662

    
1663
void r4k_do_tlbwi (void)
1664
{
1665
    /* Discard cached TLB entries.  We could avoid doing this if the
1666
       tlbwi is just upgrading access permissions on the current entry;
1667
       that might be a further win.  */
1668
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1669

    
1670
    r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb, 0);
1671
    r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
1672
}
1673

    
1674
void r4k_do_tlbwr (void)
1675
{
1676
    int r = cpu_mips_get_random(env);
1677

    
1678
    r4k_invalidate_tlb(env, r, 1);
1679
    r4k_fill_tlb(r);
1680
}
1681

    
1682
void r4k_do_tlbp (void)
1683
{
1684
    r4k_tlb_t *tlb;
1685
    target_ulong mask;
1686
    target_ulong tag;
1687
    target_ulong VPN;
1688
    uint8_t ASID;
1689
    int i;
1690

    
1691
    ASID = env->CP0_EntryHi & 0xFF;
1692
    for (i = 0; i < env->tlb->nb_tlb; i++) {
1693
        tlb = &env->tlb->mmu.r4k.tlb[i];
1694
        /* 1k pages are not supported. */
1695
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1696
        tag = env->CP0_EntryHi & ~mask;
1697
        VPN = tlb->VPN & ~mask;
1698
        /* Check ASID, virtual page number & size */
1699
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1700
            /* TLB match */
1701
            env->CP0_Index = i;
1702
            break;
1703
        }
1704
    }
1705
    if (i == env->tlb->nb_tlb) {
1706
        /* No match.  Discard any shadow entries, if any of them match.  */
1707
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1708
            tlb = &env->tlb->mmu.r4k.tlb[i];
1709
            /* 1k pages are not supported. */
1710
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1711
            tag = env->CP0_EntryHi & ~mask;
1712
            VPN = tlb->VPN & ~mask;
1713
            /* Check ASID, virtual page number & size */
1714
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1715
                r4k_mips_tlb_flush_extra (env, i);
1716
                break;
1717
            }
1718
        }
1719

    
1720
        env->CP0_Index |= 0x80000000;
1721
    }
1722
}
1723

    
1724
void r4k_do_tlbr (void)
1725
{
1726
    r4k_tlb_t *tlb;
1727
    uint8_t ASID;
1728

    
1729
    ASID = env->CP0_EntryHi & 0xFF;
1730
    tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
1731

    
1732
    /* If this will change the current ASID, flush qemu's TLB.  */
1733
    if (ASID != tlb->ASID)
1734
        cpu_mips_tlb_flush (env, 1);
1735

    
1736
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1737

    
1738
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1739
    env->CP0_PageMask = tlb->PageMask;
1740
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1741
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1742
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1743
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1744
}
1745

    
1746
#endif /* !CONFIG_USER_ONLY */
1747

    
1748
/* Specials */
1749
target_ulong do_di (void)
1750
{
1751
    target_ulong t0 = env->CP0_Status;
1752

    
1753
    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1754
    cpu_mips_update_irq(env);
1755

    
1756
    return t0;
1757
}
1758

    
1759
target_ulong do_ei (void)
1760
{
1761
    target_ulong t0 = env->CP0_Status;
1762

    
1763
    env->CP0_Status = t0 | (1 << CP0St_IE);
1764
    cpu_mips_update_irq(env);
1765

    
1766
    return t0;
1767
}
1768

    
1769
void debug_pre_eret (void)
1770
{
1771
    fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1772
            env->active_tc.PC, env->CP0_EPC);
1773
    if (env->CP0_Status & (1 << CP0St_ERL))
1774
        fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1775
    if (env->hflags & MIPS_HFLAG_DM)
1776
        fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1777
    fputs("\n", logfile);
1778
}
1779

    
1780
void debug_post_eret (void)
1781
{
1782
    fprintf(logfile, "  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1783
            env->active_tc.PC, env->CP0_EPC);
1784
    if (env->CP0_Status & (1 << CP0St_ERL))
1785
        fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1786
    if (env->hflags & MIPS_HFLAG_DM)
1787
        fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1788
    switch (env->hflags & MIPS_HFLAG_KSU) {
1789
    case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
1790
    case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
1791
    case MIPS_HFLAG_KM: fputs("\n", logfile); break;
1792
    default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1793
    }
1794
}
1795

    
1796
void do_eret (void)
1797
{
1798
    if (loglevel & CPU_LOG_EXEC)
1799
        debug_pre_eret();
1800
    if (env->CP0_Status & (1 << CP0St_ERL)) {
1801
        env->active_tc.PC = env->CP0_ErrorEPC;
1802
        env->CP0_Status &= ~(1 << CP0St_ERL);
1803
    } else {
1804
        env->active_tc.PC = env->CP0_EPC;
1805
        env->CP0_Status &= ~(1 << CP0St_EXL);
1806
    }
1807
    compute_hflags(env);
1808
    if (loglevel & CPU_LOG_EXEC)
1809
        debug_post_eret();
1810
    env->CP0_LLAddr = 1;
1811
}
1812

    
1813
void do_deret (void)
1814
{
1815
    if (loglevel & CPU_LOG_EXEC)
1816
        debug_pre_eret();
1817
    env->active_tc.PC = env->CP0_DEPC;
1818
    env->hflags &= MIPS_HFLAG_DM;
1819
    compute_hflags(env);
1820
    if (loglevel & CPU_LOG_EXEC)
1821
        debug_post_eret();
1822
    env->CP0_LLAddr = 1;
1823
}
1824

    
1825
target_ulong do_rdhwr_cpunum(void)
1826
{
1827
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1828
        (env->CP0_HWREna & (1 << 0)))
1829
        return env->CP0_EBase & 0x3ff;
1830
    else
1831
        do_raise_exception(EXCP_RI);
1832

    
1833
    return 0;
1834
}
1835

    
1836
target_ulong do_rdhwr_synci_step(void)
1837
{
1838
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1839
        (env->CP0_HWREna & (1 << 1)))
1840
        return env->SYNCI_Step;
1841
    else
1842
        do_raise_exception(EXCP_RI);
1843

    
1844
    return 0;
1845
}
1846

    
1847
target_ulong do_rdhwr_cc(void)
1848
{
1849
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1850
        (env->CP0_HWREna & (1 << 2)))
1851
        return env->CP0_Count;
1852
    else
1853
        do_raise_exception(EXCP_RI);
1854

    
1855
    return 0;
1856
}
1857

    
1858
target_ulong do_rdhwr_ccres(void)
1859
{
1860
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1861
        (env->CP0_HWREna & (1 << 3)))
1862
        return env->CCRes;
1863
    else
1864
        do_raise_exception(EXCP_RI);
1865

    
1866
    return 0;
1867
}
1868

    
1869
/* Bitfield operations. */
1870
target_ulong do_ext(target_ulong t1, uint32_t pos, uint32_t size)
1871
{
1872
    return (int32_t)((t1 >> pos) & ((size < 32) ? ((1 << size) - 1) : ~0));
1873
}
1874

    
1875
target_ulong do_ins(target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size)
1876
{
1877
    target_ulong mask = ((size < 32) ? ((1 << size) - 1) : ~0) << pos;
1878

    
1879
    return (int32_t)((t0 & ~mask) | ((t1 << pos) & mask));
1880
}
1881

    
1882
target_ulong do_wsbh(target_ulong t1)
1883
{
1884
    return (int32_t)(((t1 << 8) & ~0x00FF00FF) | ((t1 >> 8) & 0x00FF00FF));
1885
}
1886

    
1887
#if defined(TARGET_MIPS64)
1888
target_ulong do_dext(target_ulong t1, uint32_t pos, uint32_t size)
1889
{
1890
    return (t1 >> pos) & ((size < 64) ? ((1ULL << size) - 1) : ~0ULL);
1891
}
1892

    
1893
target_ulong do_dins(target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size)
1894
{
1895
    target_ulong mask = ((size < 64) ? ((1ULL << size) - 1) : ~0ULL) << pos;
1896

    
1897
    return (t0 & ~mask) | ((t1 << pos) & mask);
1898
}
1899

    
1900
target_ulong do_dsbh(target_ulong t1)
1901
{
1902
    return ((t1 << 8) & ~0x00FF00FF00FF00FFULL) | ((t1 >> 8) & 0x00FF00FF00FF00FFULL);
1903
}
1904

    
1905
target_ulong do_dshd(target_ulong t1)
1906
{
1907
    t1 = ((t1 << 16) & ~0x0000FFFF0000FFFFULL) | ((t1 >> 16) & 0x0000FFFF0000FFFFULL);
1908
    return (t1 << 32) | (t1 >> 32);
1909
}
1910
#endif
1911

    
1912
void do_pmon (int function)
1913
{
1914
    function /= 2;
1915
    switch (function) {
1916
    case 2: /* TODO: char inbyte(int waitflag); */
1917
        if (env->active_tc.gpr[4] == 0)
1918
            env->active_tc.gpr[2] = -1;
1919
        /* Fall through */
1920
    case 11: /* TODO: char inbyte (void); */
1921
        env->active_tc.gpr[2] = -1;
1922
        break;
1923
    case 3:
1924
    case 12:
1925
        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1926
        break;
1927
    case 17:
1928
        break;
1929
    case 158:
1930
        {
1931
            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
1932
            printf("%s", fmt);
1933
        }
1934
        break;
1935
    }
1936
}
1937

    
1938
void do_wait (void)
1939
{
1940
    env->halted = 1;
1941
    do_raise_exception(EXCP_HLT);
1942
}
1943

    
1944
#if !defined(CONFIG_USER_ONLY)
1945

    
1946
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1947

    
1948
#define MMUSUFFIX _mmu
1949
#define ALIGNED_ONLY
1950

    
1951
#define SHIFT 0
1952
#include "softmmu_template.h"
1953

    
1954
#define SHIFT 1
1955
#include "softmmu_template.h"
1956

    
1957
#define SHIFT 2
1958
#include "softmmu_template.h"
1959

    
1960
#define SHIFT 3
1961
#include "softmmu_template.h"
1962

    
1963
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
1964
{
1965
    env->CP0_BadVAddr = addr;
1966
    do_restore_state (retaddr);
1967
    do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
1968
}
1969

    
1970
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1971
{
1972
    TranslationBlock *tb;
1973
    CPUState *saved_env;
1974
    unsigned long pc;
1975
    int ret;
1976

    
1977
    /* XXX: hack to restore env in all cases, even if not called from
1978
       generated code */
1979
    saved_env = env;
1980
    env = cpu_single_env;
1981
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1982
    if (ret) {
1983
        if (retaddr) {
1984
            /* now we have a real cpu fault */
1985
            pc = (unsigned long)retaddr;
1986
            tb = tb_find_pc(pc);
1987
            if (tb) {
1988
                /* the PC is inside the translated code. It means that we have
1989
                   a virtual CPU fault */
1990
                cpu_restore_state(tb, env, pc, NULL);
1991
            }
1992
        }
1993
        do_raise_exception_err(env->exception_index, env->error_code);
1994
    }
1995
    env = saved_env;
1996
}
1997

    
1998
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
1999
                          int unused)
2000
{
2001
    if (is_exec)
2002
        do_raise_exception(EXCP_IBE);
2003
    else
2004
        do_raise_exception(EXCP_DBE);
2005
}
2006
#endif /* !CONFIG_USER_ONLY */
2007

    
2008
/* Complex FPU operations which may need stack space. */
2009

    
2010
#define FLOAT_ONE32 make_float32(0x3f8 << 20)
2011
#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2012
#define FLOAT_TWO32 make_float32(1 << 30)
2013
#define FLOAT_TWO64 make_float64(1ULL << 62)
2014
#define FLOAT_QNAN32 0x7fbfffff
2015
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2016
#define FLOAT_SNAN32 0x7fffffff
2017
#define FLOAT_SNAN64 0x7fffffffffffffffULL
2018

    
2019
/* convert MIPS rounding mode in FCR31 to IEEE library */
2020
unsigned int ieee_rm[] = {
2021
    float_round_nearest_even,
2022
    float_round_to_zero,
2023
    float_round_up,
2024
    float_round_down
2025
};
2026

    
2027
#define RESTORE_ROUNDING_MODE \
2028
    set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
2029

    
2030
target_ulong do_cfc1 (uint32_t reg)
2031
{
2032
    target_ulong t0;
2033

    
2034
    switch (reg) {
2035
    case 0:
2036
        t0 = (int32_t)env->fpu->fcr0;
2037
        break;
2038
    case 25:
2039
        t0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1);
2040
        break;
2041
    case 26:
2042
        t0 = env->fpu->fcr31 & 0x0003f07c;
2043
        break;
2044
    case 28:
2045
        t0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4);
2046
        break;
2047
    default:
2048
        t0 = (int32_t)env->fpu->fcr31;
2049
        break;
2050
    }
2051

    
2052
    return t0;
2053
}
2054

    
2055
void do_ctc1 (target_ulong t0, uint32_t reg)
2056
{
2057
    switch(reg) {
2058
    case 25:
2059
        if (t0 & 0xffffff00)
2060
            return;
2061
        env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((t0 & 0xfe) << 24) |
2062
                     ((t0 & 0x1) << 23);
2063
        break;
2064
    case 26:
2065
        if (t0 & 0x007c0000)
2066
            return;
2067
        env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (t0 & 0x0003f07c);
2068
        break;
2069
    case 28:
2070
        if (t0 & 0x007c0000)
2071
            return;
2072
        env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (t0 & 0x00000f83) |
2073
                     ((t0 & 0x4) << 22);
2074
        break;
2075
    case 31:
2076
        if (t0 & 0x007c0000)
2077
            return;
2078
        env->fpu->fcr31 = t0;
2079
        break;
2080
    default:
2081
        return;
2082
    }
2083
    /* set rounding mode */
2084
    RESTORE_ROUNDING_MODE;
2085
    set_float_exception_flags(0, &env->fpu->fp_status);
2086
    if ((GET_FP_ENABLE(env->fpu->fcr31) | 0x20) & GET_FP_CAUSE(env->fpu->fcr31))
2087
        do_raise_exception(EXCP_FPE);
2088
}
2089

    
2090
static always_inline char ieee_ex_to_mips(char xcpt)
2091
{
2092
    return (xcpt & float_flag_inexact) >> 5 |
2093
           (xcpt & float_flag_underflow) >> 3 |
2094
           (xcpt & float_flag_overflow) >> 1 |
2095
           (xcpt & float_flag_divbyzero) << 1 |
2096
           (xcpt & float_flag_invalid) << 4;
2097
}
2098

    
2099
static always_inline char mips_ex_to_ieee(char xcpt)
2100
{
2101
    return (xcpt & FP_INEXACT) << 5 |
2102
           (xcpt & FP_UNDERFLOW) << 3 |
2103
           (xcpt & FP_OVERFLOW) << 1 |
2104
           (xcpt & FP_DIV0) >> 1 |
2105
           (xcpt & FP_INVALID) >> 4;
2106
}
2107

    
2108
static always_inline void update_fcr31(void)
2109
{
2110
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->fpu->fp_status));
2111

    
2112
    SET_FP_CAUSE(env->fpu->fcr31, tmp);
2113
    if (GET_FP_ENABLE(env->fpu->fcr31) & tmp)
2114
        do_raise_exception(EXCP_FPE);
2115
    else
2116
        UPDATE_FP_FLAGS(env->fpu->fcr31, tmp);
2117
}
2118

    
2119
/* Float support.
2120
   Single precition routines have a "s" suffix, double precision a
2121
   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2122
   paired single lower "pl", paired single upper "pu".  */
2123

    
2124
#define FLOAT_OP(name, p) void do_float_##name##_##p(void)
2125

    
2126
/* unary operations, modifying fp status  */
2127
#define FLOAT_UNOP(name)  \
2128
FLOAT_OP(name, d)         \
2129
{                         \
2130
    FDT2 = float64_ ## name(FDT0, &env->fpu->fp_status); \
2131
}                         \
2132
FLOAT_OP(name, s)         \
2133
{                         \
2134
    FST2 = float32_ ## name(FST0, &env->fpu->fp_status); \
2135
}
2136
FLOAT_UNOP(sqrt)
2137
#undef FLOAT_UNOP
2138

    
2139
FLOAT_OP(cvtd, s)
2140
{
2141
    set_float_exception_flags(0, &env->fpu->fp_status);
2142
    FDT2 = float32_to_float64(FST0, &env->fpu->fp_status);
2143
    update_fcr31();
2144
}
2145
FLOAT_OP(cvtd, w)
2146
{
2147
    set_float_exception_flags(0, &env->fpu->fp_status);
2148
    FDT2 = int32_to_float64(WT0, &env->fpu->fp_status);
2149
    update_fcr31();
2150
}
2151
FLOAT_OP(cvtd, l)
2152
{
2153
    set_float_exception_flags(0, &env->fpu->fp_status);
2154
    FDT2 = int64_to_float64(DT0, &env->fpu->fp_status);
2155
    update_fcr31();
2156
}
2157
FLOAT_OP(cvtl, d)
2158
{
2159
    set_float_exception_flags(0, &env->fpu->fp_status);
2160
    DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
2161
    update_fcr31();
2162
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2163
        DT2 = FLOAT_SNAN64;
2164
}
2165
FLOAT_OP(cvtl, s)
2166
{
2167
    set_float_exception_flags(0, &env->fpu->fp_status);
2168
    DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
2169
    update_fcr31();
2170
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2171
        DT2 = FLOAT_SNAN64;
2172
}
2173

    
2174
FLOAT_OP(cvtps, pw)
2175
{
2176
    set_float_exception_flags(0, &env->fpu->fp_status);
2177
    FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
2178
    FSTH2 = int32_to_float32(WTH0, &env->fpu->fp_status);
2179
    update_fcr31();
2180
}
2181
FLOAT_OP(cvtpw, ps)
2182
{
2183
    set_float_exception_flags(0, &env->fpu->fp_status);
2184
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
2185
    WTH2 = float32_to_int32(FSTH0, &env->fpu->fp_status);
2186
    update_fcr31();
2187
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2188
        WT2 = FLOAT_SNAN32;
2189
}
2190
FLOAT_OP(cvts, d)
2191
{
2192
    set_float_exception_flags(0, &env->fpu->fp_status);
2193
    FST2 = float64_to_float32(FDT0, &env->fpu->fp_status);
2194
    update_fcr31();
2195
}
2196
FLOAT_OP(cvts, w)
2197
{
2198
    set_float_exception_flags(0, &env->fpu->fp_status);
2199
    FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
2200
    update_fcr31();
2201
}
2202
FLOAT_OP(cvts, l)
2203
{
2204
    set_float_exception_flags(0, &env->fpu->fp_status);
2205
    FST2 = int64_to_float32(DT0, &env->fpu->fp_status);
2206
    update_fcr31();
2207
}
2208
FLOAT_OP(cvts, pl)
2209
{
2210
    set_float_exception_flags(0, &env->fpu->fp_status);
2211
    WT2 = WT0;
2212
    update_fcr31();
2213
}
2214
FLOAT_OP(cvts, pu)
2215
{
2216
    set_float_exception_flags(0, &env->fpu->fp_status);
2217
    WT2 = WTH0;
2218
    update_fcr31();
2219
}
2220
FLOAT_OP(cvtw, s)
2221
{
2222
    set_float_exception_flags(0, &env->fpu->fp_status);
2223
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
2224
    update_fcr31();
2225
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2226
        WT2 = FLOAT_SNAN32;
2227
}
2228
FLOAT_OP(cvtw, d)
2229
{
2230
    set_float_exception_flags(0, &env->fpu->fp_status);
2231
    WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
2232
    update_fcr31();
2233
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2234
        WT2 = FLOAT_SNAN32;
2235
}
2236

    
2237
FLOAT_OP(roundl, d)
2238
{
2239
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
2240
    DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
2241
    RESTORE_ROUNDING_MODE;
2242
    update_fcr31();
2243
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2244
        DT2 = FLOAT_SNAN64;
2245
}
2246
FLOAT_OP(roundl, s)
2247
{
2248
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
2249
    DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
2250
    RESTORE_ROUNDING_MODE;
2251
    update_fcr31();
2252
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2253
        DT2 = FLOAT_SNAN64;
2254
}
2255
FLOAT_OP(roundw, d)
2256
{
2257
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
2258
    WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
2259
    RESTORE_ROUNDING_MODE;
2260
    update_fcr31();
2261
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2262
        WT2 = FLOAT_SNAN32;
2263
}
2264
FLOAT_OP(roundw, s)
2265
{
2266
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
2267
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
2268
    RESTORE_ROUNDING_MODE;
2269
    update_fcr31();
2270
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2271
        WT2 = FLOAT_SNAN32;
2272
}
2273

    
2274
FLOAT_OP(truncl, d)
2275
{
2276
    DT2 = float64_to_int64_round_to_zero(FDT0, &env->fpu->fp_status);
2277
    update_fcr31();
2278
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2279
        DT2 = FLOAT_SNAN64;
2280
}
2281
FLOAT_OP(truncl, s)
2282
{
2283
    DT2 = float32_to_int64_round_to_zero(FST0, &env->fpu->fp_status);
2284
    update_fcr31();
2285
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2286
        DT2 = FLOAT_SNAN64;
2287
}
2288
FLOAT_OP(truncw, d)
2289
{
2290
    WT2 = float64_to_int32_round_to_zero(FDT0, &env->fpu->fp_status);
2291
    update_fcr31();
2292
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2293
        WT2 = FLOAT_SNAN32;
2294
}
2295
FLOAT_OP(truncw, s)
2296
{
2297
    WT2 = float32_to_int32_round_to_zero(FST0, &env->fpu->fp_status);
2298
    update_fcr31();
2299
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2300
        WT2 = FLOAT_SNAN32;
2301
}
2302

    
2303
FLOAT_OP(ceill, d)
2304
{
2305
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
2306
    DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
2307
    RESTORE_ROUNDING_MODE;
2308
    update_fcr31();
2309
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2310
        DT2 = FLOAT_SNAN64;
2311
}
2312
FLOAT_OP(ceill, s)
2313
{
2314
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
2315
    DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
2316
    RESTORE_ROUNDING_MODE;
2317
    update_fcr31();
2318
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2319
        DT2 = FLOAT_SNAN64;
2320
}
2321
FLOAT_OP(ceilw, d)
2322
{
2323
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
2324
    WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
2325
    RESTORE_ROUNDING_MODE;
2326
    update_fcr31();
2327
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2328
        WT2 = FLOAT_SNAN32;
2329
}
2330
FLOAT_OP(ceilw, s)
2331
{
2332
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
2333
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
2334
    RESTORE_ROUNDING_MODE;
2335
    update_fcr31();
2336
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2337
        WT2 = FLOAT_SNAN32;
2338
}
2339

    
2340
FLOAT_OP(floorl, d)
2341
{
2342
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
2343
    DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
2344
    RESTORE_ROUNDING_MODE;
2345
    update_fcr31();
2346
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2347
        DT2 = FLOAT_SNAN64;
2348
}
2349
FLOAT_OP(floorl, s)
2350
{
2351
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
2352
    DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
2353
    RESTORE_ROUNDING_MODE;
2354
    update_fcr31();
2355
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2356
        DT2 = FLOAT_SNAN64;
2357
}
2358
FLOAT_OP(floorw, d)
2359
{
2360
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
2361
    WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
2362
    RESTORE_ROUNDING_MODE;
2363
    update_fcr31();
2364
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2365
        WT2 = FLOAT_SNAN32;
2366
}
2367
FLOAT_OP(floorw, s)
2368
{
2369
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
2370
    WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
2371
    RESTORE_ROUNDING_MODE;
2372
    update_fcr31();
2373
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2374
        WT2 = FLOAT_SNAN32;
2375
}
2376

    
2377
/* unary operations, not modifying fp status  */
2378
#define FLOAT_UNOP(name)  \
2379
FLOAT_OP(name, d)         \
2380
{                         \
2381
    FDT2 = float64_ ## name(FDT0);   \
2382
}                         \
2383
FLOAT_OP(name, s)         \
2384
{                         \
2385
    FST2 = float32_ ## name(FST0);   \
2386
}                         \
2387
FLOAT_OP(name, ps)        \
2388
{                         \
2389
    FST2 = float32_ ## name(FST0);   \
2390
    FSTH2 = float32_ ## name(FSTH0); \
2391
}
2392
FLOAT_UNOP(abs)
2393
FLOAT_UNOP(chs)
2394
#undef FLOAT_UNOP
2395

    
2396
/* MIPS specific unary operations */
2397
FLOAT_OP(recip, d)
2398
{
2399
    set_float_exception_flags(0, &env->fpu->fp_status);
2400
    FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
2401
    update_fcr31();
2402
}
2403
FLOAT_OP(recip, s)
2404
{
2405
    set_float_exception_flags(0, &env->fpu->fp_status);
2406
    FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
2407
    update_fcr31();
2408
}
2409

    
2410
FLOAT_OP(rsqrt, d)
2411
{
2412
    set_float_exception_flags(0, &env->fpu->fp_status);
2413
    FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
2414
    FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
2415
    update_fcr31();
2416
}
2417
FLOAT_OP(rsqrt, s)
2418
{
2419
    set_float_exception_flags(0, &env->fpu->fp_status);
2420
    FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
2421
    FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
2422
    update_fcr31();
2423
}
2424

    
2425
FLOAT_OP(recip1, d)
2426
{
2427
    set_float_exception_flags(0, &env->fpu->fp_status);
2428
    FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
2429
    update_fcr31();
2430
}
2431
FLOAT_OP(recip1, s)
2432
{
2433
    set_float_exception_flags(0, &env->fpu->fp_status);
2434
    FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
2435
    update_fcr31();
2436
}
2437
FLOAT_OP(recip1, ps)
2438
{
2439
    set_float_exception_flags(0, &env->fpu->fp_status);
2440
    FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
2441
    FSTH2 = float32_div(FLOAT_ONE32, FSTH0, &env->fpu->fp_status);
2442
    update_fcr31();
2443
}
2444

    
2445
FLOAT_OP(rsqrt1, d)
2446
{
2447
    set_float_exception_flags(0, &env->fpu->fp_status);
2448
    FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
2449
    FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
2450
    update_fcr31();
2451
}
2452
FLOAT_OP(rsqrt1, s)
2453
{
2454
    set_float_exception_flags(0, &env->fpu->fp_status);
2455
    FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
2456
    FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
2457
    update_fcr31();
2458
}
2459
FLOAT_OP(rsqrt1, ps)
2460
{
2461
    set_float_exception_flags(0, &env->fpu->fp_status);
2462
    FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
2463
    FSTH2 = float32_sqrt(FSTH0, &env->fpu->fp_status);
2464
    FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
2465
    FSTH2 = float32_div(FLOAT_ONE32, FSTH2, &env->fpu->fp_status);
2466
    update_fcr31();
2467
}
2468

    
2469
/* binary operations */
2470
#define FLOAT_BINOP(name) \
2471
FLOAT_OP(name, d)         \
2472
{                         \
2473
    set_float_exception_flags(0, &env->fpu->fp_status);            \
2474
    FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status);    \
2475
    update_fcr31();                                                \
2476
    if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID)                \
2477
        DT2 = FLOAT_QNAN64;                                        \
2478
}                         \
2479
FLOAT_OP(name, s)         \
2480
{                         \
2481
    set_float_exception_flags(0, &env->fpu->fp_status);            \
2482
    FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status);    \
2483
    update_fcr31();                                                \
2484
    if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID)                \
2485
        WT2 = FLOAT_QNAN32;                                        \
2486
}                         \
2487
FLOAT_OP(name, ps)        \
2488
{                         \
2489
    set_float_exception_flags(0, &env->fpu->fp_status);            \
2490
    FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status);    \
2491
    FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \
2492
    update_fcr31();       \
2493
    if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) {              \
2494
        WT2 = FLOAT_QNAN32;                                        \
2495
        WTH2 = FLOAT_QNAN32;                                       \
2496
    }                     \
2497
}
2498
FLOAT_BINOP(add)
2499
FLOAT_BINOP(sub)
2500
FLOAT_BINOP(mul)
2501
FLOAT_BINOP(div)
2502
#undef FLOAT_BINOP
2503

    
2504
/* ternary operations */
2505
#define FLOAT_TERNOP(name1, name2) \
2506
FLOAT_OP(name1 ## name2, d)        \
2507
{                                  \
2508
    FDT0 = float64_ ## name1 (FDT0, FDT1, &env->fpu->fp_status);    \
2509
    FDT2 = float64_ ## name2 (FDT0, FDT2, &env->fpu->fp_status);    \
2510
}                                  \
2511
FLOAT_OP(name1 ## name2, s)        \
2512
{                                  \
2513
    FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status);    \
2514
    FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status);    \
2515
}                                  \
2516
FLOAT_OP(name1 ## name2, ps)       \
2517
{                                  \
2518
    FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status);    \
2519
    FSTH0 = float32_ ## name1 (FSTH0, FSTH1, &env->fpu->fp_status); \
2520
    FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status);    \
2521
    FSTH2 = float32_ ## name2 (FSTH0, FSTH2, &env->fpu->fp_status); \
2522
}
2523
FLOAT_TERNOP(mul, add)
2524
FLOAT_TERNOP(mul, sub)
2525
#undef FLOAT_TERNOP
2526

    
2527
/* negated ternary operations */
2528
#define FLOAT_NTERNOP(name1, name2) \
2529
FLOAT_OP(n ## name1 ## name2, d)    \
2530
{                                   \
2531
    FDT0 = float64_ ## name1 (FDT0, FDT1, &env->fpu->fp_status);    \
2532
    FDT2 = float64_ ## name2 (FDT0, FDT2, &env->fpu->fp_status);    \
2533
    FDT2 = float64_chs(FDT2);       \
2534
}                                   \
2535
FLOAT_OP(n ## name1 ## name2, s)    \
2536
{                                   \
2537
    FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status);    \
2538
    FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status);    \
2539
    FST2 = float32_chs(FST2);       \
2540
}                                   \
2541
FLOAT_OP(n ## name1 ## name2, ps)   \
2542
{                                   \
2543
    FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status);    \
2544
    FSTH0 = float32_ ## name1 (FSTH0, FSTH1, &env->fpu->fp_status); \
2545
    FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status);    \
2546
    FSTH2 = float32_ ## name2 (FSTH0, FSTH2, &env->fpu->fp_status); \
2547
    FST2 = float32_chs(FST2);       \
2548
    FSTH2 = float32_chs(FSTH2);     \
2549
}
2550
FLOAT_NTERNOP(mul, add)
2551
FLOAT_NTERNOP(mul, sub)
2552
#undef FLOAT_NTERNOP
2553

    
2554
/* MIPS specific binary operations */
2555
FLOAT_OP(recip2, d)
2556
{
2557
    set_float_exception_flags(0, &env->fpu->fp_status);
2558
    FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
2559
    FDT2 = float64_chs(float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status));
2560
    update_fcr31();
2561
}
2562
FLOAT_OP(recip2, s)
2563
{
2564
    set_float_exception_flags(0, &env->fpu->fp_status);
2565
    FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
2566
    FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
2567
    update_fcr31();
2568
}
2569
FLOAT_OP(recip2, ps)
2570
{
2571
    set_float_exception_flags(0, &env->fpu->fp_status);
2572
    FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
2573
    FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
2574
    FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
2575
    FSTH2 = float32_chs(float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status));
2576
    update_fcr31();
2577
}
2578

    
2579
FLOAT_OP(rsqrt2, d)
2580
{
2581
    set_float_exception_flags(0, &env->fpu->fp_status);
2582
    FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
2583
    FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status);
2584
    FDT2 = float64_chs(float64_div(FDT2, FLOAT_TWO64, &env->fpu->fp_status));
2585
    update_fcr31();
2586
}
2587
FLOAT_OP(rsqrt2, s)
2588
{
2589
    set_float_exception_flags(0, &env->fpu->fp_status);
2590
    FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
2591
    FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
2592
    FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
2593
    update_fcr31();
2594
}
2595
FLOAT_OP(rsqrt2, ps)
2596
{
2597
    set_float_exception_flags(0, &env->fpu->fp_status);
2598
    FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
2599
    FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
2600
    FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
2601
    FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status);
2602
    FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
2603
    FSTH2 = float32_chs(float32_div(FSTH2, FLOAT_TWO32, &env->fpu->fp_status));
2604
    update_fcr31();
2605
}
2606

    
2607
FLOAT_OP(addr, ps)
2608
{
2609
    set_float_exception_flags(0, &env->fpu->fp_status);
2610
    FST2 = float32_add (FST0, FSTH0, &env->fpu->fp_status);
2611
    FSTH2 = float32_add (FST1, FSTH1, &env->fpu->fp_status);
2612
    update_fcr31();
2613
}
2614

    
2615
FLOAT_OP(mulr, ps)
2616
{
2617
    set_float_exception_flags(0, &env->fpu->fp_status);
2618
    FST2 = float32_mul (FST0, FSTH0, &env->fpu->fp_status);
2619
    FSTH2 = float32_mul (FST1, FSTH1, &env->fpu->fp_status);
2620
    update_fcr31();
2621
}
2622

    
2623
/* compare operations */
2624
#define FOP_COND_D(op, cond)                   \
2625
void do_cmp_d_ ## op (long cc)                 \
2626
{                                              \
2627
    int c = cond;                              \
2628
    update_fcr31();                            \
2629
    if (c)                                     \
2630
        SET_FP_COND(cc, env->fpu);             \
2631
    else                                       \
2632
        CLEAR_FP_COND(cc, env->fpu);           \
2633
}                                              \
2634
void do_cmpabs_d_ ## op (long cc)              \
2635
{                                              \
2636
    int c;                                     \
2637
    FDT0 = float64_abs(FDT0);                  \
2638
    FDT1 = float64_abs(FDT1);                  \
2639
    c = cond;                                  \
2640
    update_fcr31();                            \
2641
    if (c)                                     \
2642
        SET_FP_COND(cc, env->fpu);             \
2643
    else                                       \
2644
        CLEAR_FP_COND(cc, env->fpu);           \
2645
}
2646

    
2647
int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
2648
{
2649
    if (float64_is_signaling_nan(a) ||
2650
        float64_is_signaling_nan(b) ||
2651
        (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
2652
        float_raise(float_flag_invalid, status);
2653
        return 1;
2654
    } else if (float64_is_nan(a) || float64_is_nan(b)) {
2655
        return 1;
2656
    } else {
2657
        return 0;
2658
    }
2659
}
2660

    
2661
/* NOTE: the comma operator will make "cond" to eval to false,
2662
 * but float*_is_unordered() is still called. */
2663
FOP_COND_D(f,   (float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status), 0))
2664
FOP_COND_D(un,  float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status))
2665
FOP_COND_D(eq,  !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
2666
FOP_COND_D(ueq, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status)  || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
2667
FOP_COND_D(olt, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
2668
FOP_COND_D(ult, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status)  || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
2669
FOP_COND_D(ole, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
2670
FOP_COND_D(ule, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status)  || float64_le(FDT0, FDT1, &env->fpu->fp_status))
2671
/* NOTE: the comma operator will make "cond" to eval to false,
2672
 * but float*_is_unordered() is still called. */
2673
FOP_COND_D(sf,  (float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status), 0))
2674
FOP_COND_D(ngle,float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status))
2675
FOP_COND_D(seq, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
2676
FOP_COND_D(ngl, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status)  || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
2677
FOP_COND_D(lt,  !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
2678
FOP_COND_D(nge, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status)  || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
2679
FOP_COND_D(le,  !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
2680
FOP_COND_D(ngt, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status)  || float64_le(FDT0, FDT1, &env->fpu->fp_status))
2681

    
2682
#define FOP_COND_S(op, cond)                   \
2683
void do_cmp_s_ ## op (long cc)                 \
2684
{                                              \
2685
    int c = cond;                              \
2686
    update_fcr31();                            \
2687
    if (c)                                     \
2688
        SET_FP_COND(cc, env->fpu);             \
2689
    else                                       \
2690
        CLEAR_FP_COND(cc, env->fpu);           \
2691
}                                              \
2692
void do_cmpabs_s_ ## op (long cc)              \
2693
{                                              \
2694
    int c;                                     \
2695
    FST0 = float32_abs(FST0);                  \
2696
    FST1 = float32_abs(FST1);                  \
2697
    c = cond;                                  \
2698
    update_fcr31();                            \
2699
    if (c)                                     \
2700
        SET_FP_COND(cc, env->fpu);             \
2701
    else                                       \
2702
        CLEAR_FP_COND(cc, env->fpu);           \
2703
}
2704

    
2705
flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
2706
{
2707
    if (float32_is_signaling_nan(a) ||
2708
        float32_is_signaling_nan(b) ||
2709
        (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
2710
        float_raise(float_flag_invalid, status);
2711
        return 1;
2712
    } else if (float32_is_nan(a) || float32_is_nan(b)) {
2713
        return 1;
2714
    } else {
2715
        return 0;
2716
    }
2717
}
2718

    
2719
/* NOTE: the comma operator will make "cond" to eval to false,
2720
 * but float*_is_unordered() is still called. */
2721
FOP_COND_S(f,   (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0))
2722
FOP_COND_S(un,  float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status))
2723
FOP_COND_S(eq,  !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
2724
FOP_COND_S(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)  || float32_eq(FST0, FST1, &env->fpu->fp_status))
2725
FOP_COND_S(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
2726
FOP_COND_S(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)  || float32_lt(FST0, FST1, &env->fpu->fp_status))
2727
FOP_COND_S(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
2728
FOP_COND_S(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)  || float32_le(FST0, FST1, &env->fpu->fp_status))
2729
/* NOTE: the comma operator will make "cond" to eval to false,
2730
 * but float*_is_unordered() is still called. */
2731
FOP_COND_S(sf,  (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0))
2732
FOP_COND_S(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status))
2733
FOP_COND_S(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
2734
FOP_COND_S(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)  || float32_eq(FST0, FST1, &env->fpu->fp_status))
2735
FOP_COND_S(lt,  !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
2736
FOP_COND_S(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)  || float32_lt(FST0, FST1, &env->fpu->fp_status))
2737
FOP_COND_S(le,  !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
2738
FOP_COND_S(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)  || float32_le(FST0, FST1, &env->fpu->fp_status))
2739

    
2740
#define FOP_COND_PS(op, condl, condh)          \
2741
void do_cmp_ps_ ## op (long cc)                \
2742
{                                              \
2743
    int cl = condl;                            \
2744
    int ch = condh;                            \
2745
    update_fcr31();                            \
2746
    if (cl)                                    \
2747
        SET_FP_COND(cc, env->fpu);             \
2748
    else                                       \
2749
        CLEAR_FP_COND(cc, env->fpu);           \
2750
    if (ch)                                    \
2751
        SET_FP_COND(cc + 1, env->fpu);         \
2752
    else                                       \
2753
        CLEAR_FP_COND(cc + 1, env->fpu);       \
2754
}                                              \
2755
void do_cmpabs_ps_ ## op (long cc)             \
2756
{                                              \
2757
    int cl, ch;                                \
2758
    FST0 = float32_abs(FST0);                  \
2759
    FSTH0 = float32_abs(FSTH0);                \
2760
    FST1 = float32_abs(FST1);                  \
2761
    FSTH1 = float32_abs(FSTH1);                \
2762
    cl = condl;                                \
2763
    ch = condh;                                \
2764
    update_fcr31();                            \
2765
    if (cl)                                    \
2766
        SET_FP_COND(cc, env->fpu);             \
2767
    else                                       \
2768
        CLEAR_FP_COND(cc, env->fpu);           \
2769
    if (ch)                                    \
2770
        SET_FP_COND(cc + 1, env->fpu);         \
2771
    else                                       \
2772
        CLEAR_FP_COND(cc + 1, env->fpu);       \
2773
}
2774

    
2775
/* NOTE: the comma operator will make "cond" to eval to false,
2776
 * but float*_is_unordered() is still called. */
2777
FOP_COND_PS(f,   (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0),
2778
                 (float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status), 0))
2779
FOP_COND_PS(un,  float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status),
2780
                 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status))
2781
FOP_COND_PS(eq,  !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)   && float32_eq(FST0, FST1, &env->fpu->fp_status),
2782
                 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
2783
FOP_COND_PS(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)    || float32_eq(FST0, FST1, &env->fpu->fp_status),
2784
                 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
2785
FOP_COND_PS(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)   && float32_lt(FST0, FST1, &env->fpu->fp_status),
2786
                 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
2787
FOP_COND_PS(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)    || float32_lt(FST0, FST1, &env->fpu->fp_status),
2788
                 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
2789
FOP_COND_PS(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)   && float32_le(FST0, FST1, &env->fpu->fp_status),
2790
                 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
2791
FOP_COND_PS(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status)    || float32_le(FST0, FST1, &env->fpu->fp_status),
2792
                 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
2793
/* NOTE: the comma operator will make "cond" to eval to false,
2794
 * but float*_is_unordered() is still called. */
2795
FOP_COND_PS(sf,  (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0),
2796
                 (float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status), 0))
2797
FOP_COND_PS(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status),
2798
                 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status))
2799
FOP_COND_PS(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)   && float32_eq(FST0, FST1, &env->fpu->fp_status),
2800
                 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
2801
FOP_COND_PS(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)    || float32_eq(FST0, FST1, &env->fpu->fp_status),
2802
                 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
2803
FOP_COND_PS(lt,  !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)   && float32_lt(FST0, FST1, &env->fpu->fp_status),
2804
                 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
2805
FOP_COND_PS(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)    || float32_lt(FST0, FST1, &env->fpu->fp_status),
2806
                 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
2807
FOP_COND_PS(le,  !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)   && float32_le(FST0, FST1, &env->fpu->fp_status),
2808
                 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
2809
FOP_COND_PS(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status)    || float32_le(FST0, FST1, &env->fpu->fp_status),
2810
                 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status)  || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))