Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ bd7d9a6d

History | View | Annotate | Download (87.6 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdlib.h>
21
#include "exec.h"
22

    
23
#include "host-utils.h"
24

    
25
/*****************************************************************************/
26
/* Exceptions processing helpers */
27

    
28
void do_raise_exception_err (uint32_t exception, int error_code)
29
{
30
#if 1
31
    if (logfile && exception < 0x100)
32
        fprintf(logfile, "%s: %d %d\n", __func__, exception, error_code);
33
#endif
34
    env->exception_index = exception;
35
    env->error_code = error_code;
36
    cpu_loop_exit();
37
}
38

    
39
void do_raise_exception (uint32_t exception)
40
{
41
    do_raise_exception_err(exception, 0);
42
}
43

    
44
void do_interrupt_restart (void)
45
{
46
    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
47
        !(env->CP0_Status & (1 << CP0St_ERL)) &&
48
        !(env->hflags & MIPS_HFLAG_DM) &&
49
        (env->CP0_Status & (1 << CP0St_IE)) &&
50
        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
51
        env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
52
        do_raise_exception(EXCP_EXT_INTERRUPT);
53
    }
54
}
55

    
56
void do_restore_state (void *pc_ptr)
57
{
58
    TranslationBlock *tb;
59
    unsigned long pc = (unsigned long) pc_ptr;
60
    
61
    tb = tb_find_pc (pc);
62
    if (tb) {
63
        cpu_restore_state (tb, env, pc, NULL);
64
    }
65
}
66

    
67
target_ulong do_clo (target_ulong t0)
68
{
69
    return clo32(t0);
70
}
71

    
72
target_ulong do_clz (target_ulong t0)
73
{
74
    return clz32(t0);
75
}
76

    
77
#if defined(TARGET_MIPS64)
78
target_ulong do_dclo (target_ulong t0)
79
{
80
    return clo64(t0);
81
}
82

    
83
target_ulong do_dclz (target_ulong t0)
84
{
85
    return clz64(t0);
86
}
87
#endif /* TARGET_MIPS64 */
88

    
89
/* 64 bits arithmetic for 32 bits hosts */
90
static inline uint64_t get_HILO (void)
91
{
92
    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
93
}
94

    
95
static inline void set_HILO (uint64_t HILO)
96
{
97
    env->active_tc.LO[0] = (int32_t)HILO;
98
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
99
}
100

    
101
static inline void set_HIT0_LO (target_ulong t0, uint64_t HILO)
102
{
103
    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
104
    t0 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
105
}
106

    
107
static inline void set_HI_LOT0 (target_ulong t0, uint64_t HILO)
108
{
109
    t0 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
110
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
111
}
112

    
113
#if TARGET_LONG_BITS > HOST_LONG_BITS
114
void do_madd (target_ulong t0, target_ulong t1)
115
{
116
    int64_t tmp;
117

    
118
    tmp = ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
119
    set_HILO((int64_t)get_HILO() + tmp);
120
}
121

    
122
void do_maddu (target_ulong t0, target_ulong t1)
123
{
124
    uint64_t tmp;
125

    
126
    tmp = ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
127
    set_HILO(get_HILO() + tmp);
128
}
129

    
130
void do_msub (target_ulong t0, target_ulong t1)
131
{
132
    int64_t tmp;
133

    
134
    tmp = ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
135
    set_HILO((int64_t)get_HILO() - tmp);
136
}
137

    
138
void do_msubu (target_ulong t0, target_ulong t1)
139
{
140
    uint64_t tmp;
141

    
142
    tmp = ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
143
    set_HILO(get_HILO() - tmp);
144
}
145
#endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
146

    
147
/* Multiplication variants of the vr54xx. */
148
target_ulong do_muls (target_ulong t0, target_ulong t1)
149
{
150
    set_HI_LOT0(t0, 0 - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
151

    
152
    return t0;
153
}
154

    
155
target_ulong do_mulsu (target_ulong t0, target_ulong t1)
156
{
157
    set_HI_LOT0(t0, 0 - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
158

    
159
    return t0;
160
}
161

    
162
target_ulong do_macc (target_ulong t0, target_ulong t1)
163
{
164
    set_HI_LOT0(t0, ((int64_t)get_HILO()) + ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
165

    
166
    return t0;
167
}
168

    
169
target_ulong do_macchi (target_ulong t0, target_ulong t1)
170
{
171
    set_HIT0_LO(t0, ((int64_t)get_HILO()) + ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
172

    
173
    return t0;
174
}
175

    
176
target_ulong do_maccu (target_ulong t0, target_ulong t1)
177
{
178
    set_HI_LOT0(t0, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
179

    
180
    return t0;
181
}
182

    
183
target_ulong do_macchiu (target_ulong t0, target_ulong t1)
184
{
185
    set_HIT0_LO(t0, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
186

    
187
    return t0;
188
}
189

    
190
target_ulong do_msac (target_ulong t0, target_ulong t1)
191
{
192
    set_HI_LOT0(t0, ((int64_t)get_HILO()) - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
193

    
194
    return t0;
195
}
196

    
197
target_ulong do_msachi (target_ulong t0, target_ulong t1)
198
{
199
    set_HIT0_LO(t0, ((int64_t)get_HILO()) - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
200

    
201
    return t0;
202
}
203

    
204
target_ulong do_msacu (target_ulong t0, target_ulong t1)
205
{
206
    set_HI_LOT0(t0, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
207

    
208
    return t0;
209
}
210

    
211
target_ulong do_msachiu (target_ulong t0, target_ulong t1)
212
{
213
    set_HIT0_LO(t0, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
214

    
215
    return t0;
216
}
217

    
218
target_ulong do_mulhi (target_ulong t0, target_ulong t1)
219
{
220
    set_HIT0_LO(t0, (int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
221

    
222
    return t0;
223
}
224

    
225
target_ulong do_mulhiu (target_ulong t0, target_ulong t1)
226
{
227
    set_HIT0_LO(t0, (uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
228

    
229
    return t0;
230
}
231

    
232
target_ulong do_mulshi (target_ulong t0, target_ulong t1)
233
{
234
    set_HIT0_LO(t0, 0 - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
235

    
236
    return t0;
237
}
238

    
239
target_ulong do_mulshiu (target_ulong t0, target_ulong t1)
240
{
241
    set_HIT0_LO(t0, 0 - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
242

    
243
    return t0;
244
}
245

    
246
#ifdef TARGET_MIPS64
247
void do_dmult (target_ulong t0, target_ulong t1)
248
{
249
    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), t0, t1);
250
}
251

    
252
void do_dmultu (target_ulong t0, target_ulong t1)
253
{
254
    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), t0, t1);
255
}
256
#endif
257

    
258
#ifdef TARGET_WORDS_BIGENDIAN
259
#define GET_LMASK(v) ((v) & 3)
260
#define GET_OFFSET(addr, offset) (addr + (offset))
261
#else
262
#define GET_LMASK(v) (((v) & 3) ^ 3)
263
#define GET_OFFSET(addr, offset) (addr - (offset))
264
#endif
265

    
266
target_ulong do_lwl(target_ulong t0, target_ulong t1, int mem_idx)
267
{
268
    target_ulong tmp;
269

    
270
#ifdef CONFIG_USER_ONLY
271
#define ldfun ldub_raw
272
#else
273
    int (*ldfun)(target_ulong);
274

    
275
    switch (mem_idx)
276
    {
277
    case 0: ldfun = ldub_kernel; break;
278
    case 1: ldfun = ldub_super; break;
279
    default:
280
    case 2: ldfun = ldub_user; break;
281
    }
282
#endif
283
    tmp = ldfun(t0);
284
    t1 = (t1 & 0x00FFFFFF) | (tmp << 24);
285

    
286
    if (GET_LMASK(t0) <= 2) {
287
        tmp = ldfun(GET_OFFSET(t0, 1));
288
        t1 = (t1 & 0xFF00FFFF) | (tmp << 16);
289
    }
290

    
291
    if (GET_LMASK(t0) <= 1) {
292
        tmp = ldfun(GET_OFFSET(t0, 2));
293
        t1 = (t1 & 0xFFFF00FF) | (tmp << 8);
294
    }
295

    
296
    if (GET_LMASK(t0) == 0) {
297
        tmp = ldfun(GET_OFFSET(t0, 3));
298
        t1 = (t1 & 0xFFFFFF00) | tmp;
299
    }
300
    return (int32_t)t1;
301
}
302

    
303
target_ulong do_lwr(target_ulong t0, target_ulong t1, int mem_idx)
304
{
305
    target_ulong tmp;
306

    
307
#ifdef CONFIG_USER_ONLY
308
#define ldfun ldub_raw
309
#else
310
    int (*ldfun)(target_ulong);
311

    
312
    switch (mem_idx)
313
    {
314
    case 0: ldfun = ldub_kernel; break;
315
    case 1: ldfun = ldub_super; break;
316
    default:
317
    case 2: ldfun = ldub_user; break;
318
    }
319
#endif
320
    tmp = ldfun(t0);
321
    t1 = (t1 & 0xFFFFFF00) | tmp;
322

    
323
    if (GET_LMASK(t0) >= 1) {
324
        tmp = ldfun(GET_OFFSET(t0, -1));
325
        t1 = (t1 & 0xFFFF00FF) | (tmp << 8);
326
    }
327

    
328
    if (GET_LMASK(t0) >= 2) {
329
        tmp = ldfun(GET_OFFSET(t0, -2));
330
        t1 = (t1 & 0xFF00FFFF) | (tmp << 16);
331
    }
332

    
333
    if (GET_LMASK(t0) == 3) {
334
        tmp = ldfun(GET_OFFSET(t0, -3));
335
        t1 = (t1 & 0x00FFFFFF) | (tmp << 24);
336
    }
337
    return (int32_t)t1;
338
}
339

    
340
void do_swl(target_ulong t0, target_ulong t1, int mem_idx)
341
{
342
#ifdef CONFIG_USER_ONLY
343
#define stfun stb_raw
344
#else
345
    void (*stfun)(target_ulong, int);
346

    
347
    switch (mem_idx)
348
    {
349
    case 0: stfun = stb_kernel; break;
350
    case 1: stfun = stb_super; break;
351
    default:
352
    case 2: stfun = stb_user; break;
353
    }
354
#endif
355
    stfun(t0, (uint8_t)(t1 >> 24));
356

    
357
    if (GET_LMASK(t0) <= 2)
358
        stfun(GET_OFFSET(t0, 1), (uint8_t)(t1 >> 16));
359

    
360
    if (GET_LMASK(t0) <= 1)
361
        stfun(GET_OFFSET(t0, 2), (uint8_t)(t1 >> 8));
362

    
363
    if (GET_LMASK(t0) == 0)
364
        stfun(GET_OFFSET(t0, 3), (uint8_t)t1);
365
}
366

    
367
void do_swr(target_ulong t0, target_ulong t1, int mem_idx)
368
{
369
#ifdef CONFIG_USER_ONLY
370
#define stfun stb_raw
371
#else
372
    void (*stfun)(target_ulong, int);
373

    
374
    switch (mem_idx)
375
    {
376
    case 0: stfun = stb_kernel; break;
377
    case 1: stfun = stb_super; break;
378
    default:
379
    case 2: stfun = stb_user; break;
380
    }
381
#endif
382
    stfun(t0, (uint8_t)t1);
383

    
384
    if (GET_LMASK(t0) >= 1)
385
        stfun(GET_OFFSET(t0, -1), (uint8_t)(t1 >> 8));
386

    
387
    if (GET_LMASK(t0) >= 2)
388
        stfun(GET_OFFSET(t0, -2), (uint8_t)(t1 >> 16));
389

    
390
    if (GET_LMASK(t0) == 3)
391
        stfun(GET_OFFSET(t0, -3), (uint8_t)(t1 >> 24));
392
}
393

    
394
#if defined(TARGET_MIPS64)
395
/* "half" load and stores.  We must do the memory access inline,
396
   or fault handling won't work.  */
397

    
398
#ifdef TARGET_WORDS_BIGENDIAN
399
#define GET_LMASK64(v) ((v) & 7)
400
#else
401
#define GET_LMASK64(v) (((v) & 7) ^ 7)
402
#endif
403

    
404
target_ulong do_ldl(target_ulong t0, target_ulong t1, int mem_idx)
405
{
406
    uint64_t tmp;
407

    
408
#ifdef CONFIG_USER_ONLY
409
#define ldfun ldub_raw
410
#else
411
    int (*ldfun)(target_ulong);
412

    
413
    switch (mem_idx)
414
    {
415
    case 0: ldfun = ldub_kernel; break;
416
    case 1: ldfun = ldub_super; break;
417
    default:
418
    case 2: ldfun = ldub_user; break;
419
    }
420
#endif
421
    tmp = ldfun(t0);
422
    t1 = (t1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
423

    
424
    if (GET_LMASK64(t0) <= 6) {
425
        tmp = ldfun(GET_OFFSET(t0, 1));
426
        t1 = (t1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
427
    }
428

    
429
    if (GET_LMASK64(t0) <= 5) {
430
        tmp = ldfun(GET_OFFSET(t0, 2));
431
        t1 = (t1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
432
    }
433

    
434
    if (GET_LMASK64(t0) <= 4) {
435
        tmp = ldfun(GET_OFFSET(t0, 3));
436
        t1 = (t1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
437
    }
438

    
439
    if (GET_LMASK64(t0) <= 3) {
440
        tmp = ldfun(GET_OFFSET(t0, 4));
441
        t1 = (t1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
442
    }
443

    
444
    if (GET_LMASK64(t0) <= 2) {
445
        tmp = ldfun(GET_OFFSET(t0, 5));
446
        t1 = (t1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
447
    }
448

    
449
    if (GET_LMASK64(t0) <= 1) {
450
        tmp = ldfun(GET_OFFSET(t0, 6));
451
        t1 = (t1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
452
    }
453

    
454
    if (GET_LMASK64(t0) == 0) {
455
        tmp = ldfun(GET_OFFSET(t0, 7));
456
        t1 = (t1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
457
    }
458

    
459
    return t1;
460
}
461

    
462
target_ulong do_ldr(target_ulong t0, target_ulong t1, int mem_idx)
463
{
464
    uint64_t tmp;
465

    
466
#ifdef CONFIG_USER_ONLY
467
#define ldfun ldub_raw
468
#else
469
    int (*ldfun)(target_ulong);
470

    
471
    switch (mem_idx)
472
    {
473
    case 0: ldfun = ldub_kernel; break;
474
    case 1: ldfun = ldub_super; break;
475
    default:
476
    case 2: ldfun = ldub_user; break;
477
    }
478
#endif
479
    tmp = ldfun(t0);
480
    t1 = (t1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
481

    
482
    if (GET_LMASK64(t0) >= 1) {
483
        tmp = ldfun(GET_OFFSET(t0, -1));
484
        t1 = (t1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
485
    }
486

    
487
    if (GET_LMASK64(t0) >= 2) {
488
        tmp = ldfun(GET_OFFSET(t0, -2));
489
        t1 = (t1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
490
    }
491

    
492
    if (GET_LMASK64(t0) >= 3) {
493
        tmp = ldfun(GET_OFFSET(t0, -3));
494
        t1 = (t1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
495
    }
496

    
497
    if (GET_LMASK64(t0) >= 4) {
498
        tmp = ldfun(GET_OFFSET(t0, -4));
499
        t1 = (t1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
500
    }
501

    
502
    if (GET_LMASK64(t0) >= 5) {
503
        tmp = ldfun(GET_OFFSET(t0, -5));
504
        t1 = (t1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
505
    }
506

    
507
    if (GET_LMASK64(t0) >= 6) {
508
        tmp = ldfun(GET_OFFSET(t0, -6));
509
        t1 = (t1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
510
    }
511

    
512
    if (GET_LMASK64(t0) == 7) {
513
        tmp = ldfun(GET_OFFSET(t0, -7));
514
        t1 = (t1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
515
    }
516

    
517
    return t1;
518
}
519

    
520
void do_sdl(target_ulong t0, target_ulong t1, int mem_idx)
521
{
522
#ifdef CONFIG_USER_ONLY
523
#define stfun stb_raw
524
#else
525
    void (*stfun)(target_ulong, int);
526

    
527
    switch (mem_idx)
528
    {
529
    case 0: stfun = stb_kernel; break;
530
    case 1: stfun = stb_super; break;
531
    default:
532
    case 2: stfun = stb_user; break;
533
    }
534
#endif
535
    stfun(t0, (uint8_t)(t1 >> 56));
536

    
537
    if (GET_LMASK64(t0) <= 6)
538
        stfun(GET_OFFSET(t0, 1), (uint8_t)(t1 >> 48));
539

    
540
    if (GET_LMASK64(t0) <= 5)
541
        stfun(GET_OFFSET(t0, 2), (uint8_t)(t1 >> 40));
542

    
543
    if (GET_LMASK64(t0) <= 4)
544
        stfun(GET_OFFSET(t0, 3), (uint8_t)(t1 >> 32));
545

    
546
    if (GET_LMASK64(t0) <= 3)
547
        stfun(GET_OFFSET(t0, 4), (uint8_t)(t1 >> 24));
548

    
549
    if (GET_LMASK64(t0) <= 2)
550
        stfun(GET_OFFSET(t0, 5), (uint8_t)(t1 >> 16));
551

    
552
    if (GET_LMASK64(t0) <= 1)
553
        stfun(GET_OFFSET(t0, 6), (uint8_t)(t1 >> 8));
554

    
555
    if (GET_LMASK64(t0) <= 0)
556
        stfun(GET_OFFSET(t0, 7), (uint8_t)t1);
557
}
558

    
559
void do_sdr(target_ulong t0, target_ulong t1, int mem_idx)
560
{
561
#ifdef CONFIG_USER_ONLY
562
#define stfun stb_raw
563
#else
564
    void (*stfun)(target_ulong, int);
565

    
566
    switch (mem_idx)
567
    {
568
    case 0: stfun = stb_kernel; break;
569
    case 1: stfun = stb_super; break;
570
     default:
571
    case 2: stfun = stb_user; break;
572
    }
573
#endif
574
    stfun(t0, (uint8_t)t1);
575

    
576
    if (GET_LMASK64(t0) >= 1)
577
        stfun(GET_OFFSET(t0, -1), (uint8_t)(t1 >> 8));
578

    
579
    if (GET_LMASK64(t0) >= 2)
580
        stfun(GET_OFFSET(t0, -2), (uint8_t)(t1 >> 16));
581

    
582
    if (GET_LMASK64(t0) >= 3)
583
        stfun(GET_OFFSET(t0, -3), (uint8_t)(t1 >> 24));
584

    
585
    if (GET_LMASK64(t0) >= 4)
586
        stfun(GET_OFFSET(t0, -4), (uint8_t)(t1 >> 32));
587

    
588
    if (GET_LMASK64(t0) >= 5)
589
        stfun(GET_OFFSET(t0, -5), (uint8_t)(t1 >> 40));
590

    
591
    if (GET_LMASK64(t0) >= 6)
592
        stfun(GET_OFFSET(t0, -6), (uint8_t)(t1 >> 48));
593

    
594
    if (GET_LMASK64(t0) == 7)
595
        stfun(GET_OFFSET(t0, -7), (uint8_t)(t1 >> 56));
596
}
597
#endif /* TARGET_MIPS64 */
598

    
599
#ifndef CONFIG_USER_ONLY
600
/* CP0 helpers */
601
target_ulong do_mfc0_mvpcontrol (void)
602
{
603
    return env->mvp->CP0_MVPControl;
604
}
605

    
606
target_ulong do_mfc0_mvpconf0 (void)
607
{
608
    return env->mvp->CP0_MVPConf0;
609
}
610

    
611
target_ulong do_mfc0_mvpconf1 (void)
612
{
613
    return env->mvp->CP0_MVPConf1;
614
}
615

    
616
target_ulong do_mfc0_random (void)
617
{
618
    return (int32_t)cpu_mips_get_random(env);
619
}
620

    
621
target_ulong do_mfc0_tcstatus (void)
622
{
623
    return env->active_tc.CP0_TCStatus;
624
}
625

    
626
target_ulong do_mftc0_tcstatus(void)
627
{
628
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
629

    
630
    if (other_tc == env->current_tc)
631
        return env->active_tc.CP0_TCStatus;
632
    else
633
        return env->tcs[other_tc].CP0_TCStatus;
634
}
635

    
636
target_ulong do_mfc0_tcbind (void)
637
{
638
    return env->active_tc.CP0_TCBind;
639
}
640

    
641
target_ulong do_mftc0_tcbind(void)
642
{
643
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
644

    
645
    if (other_tc == env->current_tc)
646
        return env->active_tc.CP0_TCBind;
647
    else
648
        return env->tcs[other_tc].CP0_TCBind;
649
}
650

    
651
target_ulong do_mfc0_tcrestart (void)
652
{
653
    return env->active_tc.PC;
654
}
655

    
656
target_ulong do_mftc0_tcrestart(void)
657
{
658
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
659

    
660
    if (other_tc == env->current_tc)
661
        return env->active_tc.PC;
662
    else
663
        return env->tcs[other_tc].PC;
664
}
665

    
666
target_ulong do_mfc0_tchalt (void)
667
{
668
    return env->active_tc.CP0_TCHalt;
669
}
670

    
671
target_ulong do_mftc0_tchalt(void)
672
{
673
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
674

    
675
    if (other_tc == env->current_tc)
676
        return env->active_tc.CP0_TCHalt;
677
    else
678
        return env->tcs[other_tc].CP0_TCHalt;
679
}
680

    
681
target_ulong do_mfc0_tccontext (void)
682
{
683
    return env->active_tc.CP0_TCContext;
684
}
685

    
686
target_ulong do_mftc0_tccontext(void)
687
{
688
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
689

    
690
    if (other_tc == env->current_tc)
691
        return env->active_tc.CP0_TCContext;
692
    else
693
        return env->tcs[other_tc].CP0_TCContext;
694
}
695

    
696
target_ulong do_mfc0_tcschedule (void)
697
{
698
    return env->active_tc.CP0_TCSchedule;
699
}
700

    
701
target_ulong do_mftc0_tcschedule(void)
702
{
703
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
704

    
705
    if (other_tc == env->current_tc)
706
        return env->active_tc.CP0_TCSchedule;
707
    else
708
        return env->tcs[other_tc].CP0_TCSchedule;
709
}
710

    
711
target_ulong do_mfc0_tcschefback (void)
712
{
713
    return env->active_tc.CP0_TCScheFBack;
714
}
715

    
716
target_ulong do_mftc0_tcschefback(void)
717
{
718
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
719

    
720
    if (other_tc == env->current_tc)
721
        return env->active_tc.CP0_TCScheFBack;
722
    else
723
        return env->tcs[other_tc].CP0_TCScheFBack;
724
}
725

    
726
target_ulong do_mfc0_count (void)
727
{
728
    return (int32_t)cpu_mips_get_count(env);
729
}
730

    
731
target_ulong do_mftc0_entryhi(void)
732
{
733
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
734
    int32_t tcstatus;
735

    
736
    if (other_tc == env->current_tc)
737
        tcstatus = env->active_tc.CP0_TCStatus;
738
    else
739
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
740

    
741
    return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
742
}
743

    
744
target_ulong do_mftc0_status(void)
745
{
746
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
747
    target_ulong t0;
748
    int32_t tcstatus;
749

    
750
    if (other_tc == env->current_tc)
751
        tcstatus = env->active_tc.CP0_TCStatus;
752
    else
753
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
754

    
755
    t0 = env->CP0_Status & ~0xf1000018;
756
    t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
757
    t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
758
    t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
759

    
760
    return t0;
761
}
762

    
763
target_ulong do_mfc0_lladdr (void)
764
{
765
    return (int32_t)env->CP0_LLAddr >> 4;
766
}
767

    
768
target_ulong do_mfc0_watchlo (uint32_t sel)
769
{
770
    return (int32_t)env->CP0_WatchLo[sel];
771
}
772

    
773
target_ulong do_mfc0_watchhi (uint32_t sel)
774
{
775
    return env->CP0_WatchHi[sel];
776
}
777

    
778
target_ulong do_mfc0_debug (void)
779
{
780
    target_ulong t0 = env->CP0_Debug;
781
    if (env->hflags & MIPS_HFLAG_DM)
782
        t0 |= 1 << CP0DB_DM;
783

    
784
    return t0;
785
}
786

    
787
target_ulong do_mftc0_debug(void)
788
{
789
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
790
    int32_t tcstatus;
791

    
792
    if (other_tc == env->current_tc)
793
        tcstatus = env->active_tc.CP0_Debug_tcstatus;
794
    else
795
        tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
796

    
797
    /* XXX: Might be wrong, check with EJTAG spec. */
798
    return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
799
            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
800
}
801

    
802
#if defined(TARGET_MIPS64)
803
target_ulong do_dmfc0_tcrestart (void)
804
{
805
    return env->active_tc.PC;
806
}
807

    
808
target_ulong do_dmfc0_tchalt (void)
809
{
810
    return env->active_tc.CP0_TCHalt;
811
}
812

    
813
target_ulong do_dmfc0_tccontext (void)
814
{
815
    return env->active_tc.CP0_TCContext;
816
}
817

    
818
target_ulong do_dmfc0_tcschedule (void)
819
{
820
    return env->active_tc.CP0_TCSchedule;
821
}
822

    
823
target_ulong do_dmfc0_tcschefback (void)
824
{
825
    return env->active_tc.CP0_TCScheFBack;
826
}
827

    
828
target_ulong do_dmfc0_lladdr (void)
829
{
830
    return env->CP0_LLAddr >> 4;
831
}
832

    
833
target_ulong do_dmfc0_watchlo (uint32_t sel)
834
{
835
    return env->CP0_WatchLo[sel];
836
}
837
#endif /* TARGET_MIPS64 */
838

    
839
void do_mtc0_index (target_ulong t0)
840
{
841
    int num = 1;
842
    unsigned int tmp = env->tlb->nb_tlb;
843

    
844
    do {
845
        tmp >>= 1;
846
        num <<= 1;
847
    } while (tmp);
848
    env->CP0_Index = (env->CP0_Index & 0x80000000) | (t0 & (num - 1));
849
}
850

    
851
void do_mtc0_mvpcontrol (target_ulong t0)
852
{
853
    uint32_t mask = 0;
854
    uint32_t newval;
855

    
856
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
857
        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
858
                (1 << CP0MVPCo_EVP);
859
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
860
        mask |= (1 << CP0MVPCo_STLB);
861
    newval = (env->mvp->CP0_MVPControl & ~mask) | (t0 & mask);
862

    
863
    // TODO: Enable/disable shared TLB, enable/disable VPEs.
864

    
865
    env->mvp->CP0_MVPControl = newval;
866
}
867

    
868
void do_mtc0_vpecontrol (target_ulong t0)
869
{
870
    uint32_t mask;
871
    uint32_t newval;
872

    
873
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
874
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
875
    newval = (env->CP0_VPEControl & ~mask) | (t0 & mask);
876

    
877
    /* Yield scheduler intercept not implemented. */
878
    /* Gating storage scheduler intercept not implemented. */
879

    
880
    // TODO: Enable/disable TCs.
881

    
882
    env->CP0_VPEControl = newval;
883
}
884

    
885
void do_mtc0_vpeconf0 (target_ulong t0)
886
{
887
    uint32_t mask = 0;
888
    uint32_t newval;
889

    
890
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
891
        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
892
            mask |= (0xff << CP0VPEC0_XTC);
893
        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
894
    }
895
    newval = (env->CP0_VPEConf0 & ~mask) | (t0 & mask);
896

    
897
    // TODO: TC exclusive handling due to ERL/EXL.
898

    
899
    env->CP0_VPEConf0 = newval;
900
}
901

    
902
void do_mtc0_vpeconf1 (target_ulong t0)
903
{
904
    uint32_t mask = 0;
905
    uint32_t newval;
906

    
907
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
908
        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
909
                (0xff << CP0VPEC1_NCP1);
910
    newval = (env->CP0_VPEConf1 & ~mask) | (t0 & mask);
911

    
912
    /* UDI not implemented. */
913
    /* CP2 not implemented. */
914

    
915
    // TODO: Handle FPU (CP1) binding.
916

    
917
    env->CP0_VPEConf1 = newval;
918
}
919

    
920
void do_mtc0_yqmask (target_ulong t0)
921
{
922
    /* Yield qualifier inputs not implemented. */
923
    env->CP0_YQMask = 0x00000000;
924
}
925

    
926
void do_mtc0_vpeopt (target_ulong t0)
927
{
928
    env->CP0_VPEOpt = t0 & 0x0000ffff;
929
}
930

    
931
void do_mtc0_entrylo0 (target_ulong t0)
932
{
933
    /* Large physaddr (PABITS) not implemented */
934
    /* 1k pages not implemented */
935
    env->CP0_EntryLo0 = t0 & 0x3FFFFFFF;
936
}
937

    
938
void do_mtc0_tcstatus (target_ulong t0)
939
{
940
    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
941
    uint32_t newval;
942

    
943
    newval = (env->active_tc.CP0_TCStatus & ~mask) | (t0 & mask);
944

    
945
    // TODO: Sync with CP0_Status.
946

    
947
    env->active_tc.CP0_TCStatus = newval;
948
}
949

    
950
void do_mttc0_tcstatus (target_ulong t0)
951
{
952
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
953

    
954
    // TODO: Sync with CP0_Status.
955

    
956
    if (other_tc == env->current_tc)
957
        env->active_tc.CP0_TCStatus = t0;
958
    else
959
        env->tcs[other_tc].CP0_TCStatus = t0;
960
}
961

    
962
void do_mtc0_tcbind (target_ulong t0)
963
{
964
    uint32_t mask = (1 << CP0TCBd_TBE);
965
    uint32_t newval;
966

    
967
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
968
        mask |= (1 << CP0TCBd_CurVPE);
969
    newval = (env->active_tc.CP0_TCBind & ~mask) | (t0 & mask);
970
    env->active_tc.CP0_TCBind = newval;
971
}
972

    
973
void do_mttc0_tcbind (target_ulong t0)
974
{
975
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
976
    uint32_t mask = (1 << CP0TCBd_TBE);
977
    uint32_t newval;
978

    
979
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
980
        mask |= (1 << CP0TCBd_CurVPE);
981
    if (other_tc == env->current_tc) {
982
        newval = (env->active_tc.CP0_TCBind & ~mask) | (t0 & mask);
983
        env->active_tc.CP0_TCBind = newval;
984
    } else {
985
        newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (t0 & mask);
986
        env->tcs[other_tc].CP0_TCBind = newval;
987
    }
988
}
989

    
990
void do_mtc0_tcrestart (target_ulong t0)
991
{
992
    env->active_tc.PC = t0;
993
    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
994
    env->CP0_LLAddr = 0ULL;
995
    /* MIPS16 not implemented. */
996
}
997

    
998
void do_mttc0_tcrestart (target_ulong t0)
999
{
1000
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1001

    
1002
    if (other_tc == env->current_tc) {
1003
        env->active_tc.PC = t0;
1004
        env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1005
        env->CP0_LLAddr = 0ULL;
1006
        /* MIPS16 not implemented. */
1007
    } else {
1008
        env->tcs[other_tc].PC = t0;
1009
        env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1010
        env->CP0_LLAddr = 0ULL;
1011
        /* MIPS16 not implemented. */
1012
    }
1013
}
1014

    
1015
void do_mtc0_tchalt (target_ulong t0)
1016
{
1017
    env->active_tc.CP0_TCHalt = t0 & 0x1;
1018

    
1019
    // TODO: Halt TC / Restart (if allocated+active) TC.
1020
}
1021

    
1022
void do_mttc0_tchalt (target_ulong t0)
1023
{
1024
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1025

    
1026
    // TODO: Halt TC / Restart (if allocated+active) TC.
1027

    
1028
    if (other_tc == env->current_tc)
1029
        env->active_tc.CP0_TCHalt = t0;
1030
    else
1031
        env->tcs[other_tc].CP0_TCHalt = t0;
1032
}
1033

    
1034
void do_mtc0_tccontext (target_ulong t0)
1035
{
1036
    env->active_tc.CP0_TCContext = t0;
1037
}
1038

    
1039
void do_mttc0_tccontext (target_ulong t0)
1040
{
1041
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1042

    
1043
    if (other_tc == env->current_tc)
1044
        env->active_tc.CP0_TCContext = t0;
1045
    else
1046
        env->tcs[other_tc].CP0_TCContext = t0;
1047
}
1048

    
1049
void do_mtc0_tcschedule (target_ulong t0)
1050
{
1051
    env->active_tc.CP0_TCSchedule = t0;
1052
}
1053

    
1054
void do_mttc0_tcschedule (target_ulong t0)
1055
{
1056
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1057

    
1058
    if (other_tc == env->current_tc)
1059
        env->active_tc.CP0_TCSchedule = t0;
1060
    else
1061
        env->tcs[other_tc].CP0_TCSchedule = t0;
1062
}
1063

    
1064
void do_mtc0_tcschefback (target_ulong t0)
1065
{
1066
    env->active_tc.CP0_TCScheFBack = t0;
1067
}
1068

    
1069
void do_mttc0_tcschefback (target_ulong t0)
1070
{
1071
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1072

    
1073
    if (other_tc == env->current_tc)
1074
        env->active_tc.CP0_TCScheFBack = t0;
1075
    else
1076
        env->tcs[other_tc].CP0_TCScheFBack = t0;
1077
}
1078

    
1079
void do_mtc0_entrylo1 (target_ulong t0)
1080
{
1081
    /* Large physaddr (PABITS) not implemented */
1082
    /* 1k pages not implemented */
1083
    env->CP0_EntryLo1 = t0 & 0x3FFFFFFF;
1084
}
1085

    
1086
void do_mtc0_context (target_ulong t0)
1087
{
1088
    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (t0 & ~0x007FFFFF);
1089
}
1090

    
1091
void do_mtc0_pagemask (target_ulong t0)
1092
{
1093
    /* 1k pages not implemented */
1094
    env->CP0_PageMask = t0 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1095
}
1096

    
1097
void do_mtc0_pagegrain (target_ulong t0)
1098
{
1099
    /* SmartMIPS not implemented */
1100
    /* Large physaddr (PABITS) not implemented */
1101
    /* 1k pages not implemented */
1102
    env->CP0_PageGrain = 0;
1103
}
1104

    
1105
void do_mtc0_wired (target_ulong t0)
1106
{
1107
    env->CP0_Wired = t0 % env->tlb->nb_tlb;
1108
}
1109

    
1110
void do_mtc0_srsconf0 (target_ulong t0)
1111
{
1112
    env->CP0_SRSConf0 |= t0 & env->CP0_SRSConf0_rw_bitmask;
1113
}
1114

    
1115
void do_mtc0_srsconf1 (target_ulong t0)
1116
{
1117
    env->CP0_SRSConf1 |= t0 & env->CP0_SRSConf1_rw_bitmask;
1118
}
1119

    
1120
void do_mtc0_srsconf2 (target_ulong t0)
1121
{
1122
    env->CP0_SRSConf2 |= t0 & env->CP0_SRSConf2_rw_bitmask;
1123
}
1124

    
1125
void do_mtc0_srsconf3 (target_ulong t0)
1126
{
1127
    env->CP0_SRSConf3 |= t0 & env->CP0_SRSConf3_rw_bitmask;
1128
}
1129

    
1130
void do_mtc0_srsconf4 (target_ulong t0)
1131
{
1132
    env->CP0_SRSConf4 |= t0 & env->CP0_SRSConf4_rw_bitmask;
1133
}
1134

    
1135
void do_mtc0_hwrena (target_ulong t0)
1136
{
1137
    env->CP0_HWREna = t0 & 0x0000000F;
1138
}
1139

    
1140
void do_mtc0_count (target_ulong t0)
1141
{
1142
    cpu_mips_store_count(env, t0);
1143
}
1144

    
1145
void do_mtc0_entryhi (target_ulong t0)
1146
{
1147
    target_ulong old, val;
1148

    
1149
    /* 1k pages not implemented */
1150
    val = t0 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1151
#if defined(TARGET_MIPS64)
1152
    val &= env->SEGMask;
1153
#endif
1154
    old = env->CP0_EntryHi;
1155
    env->CP0_EntryHi = val;
1156
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1157
        uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
1158
        env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
1159
    }
1160
    /* If the ASID changes, flush qemu's TLB.  */
1161
    if ((old & 0xFF) != (val & 0xFF))
1162
        cpu_mips_tlb_flush(env, 1);
1163
}
1164

    
1165
void do_mttc0_entryhi(target_ulong t0)
1166
{
1167
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1168
    int32_t tcstatus;
1169

    
1170
    env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (t0 & ~0xff);
1171
    if (other_tc == env->current_tc) {
1172
        tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (t0 & 0xff);
1173
        env->active_tc.CP0_TCStatus = tcstatus;
1174
    } else {
1175
        tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (t0 & 0xff);
1176
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1177
    }
1178
}
1179

    
1180
void do_mtc0_compare (target_ulong t0)
1181
{
1182
    cpu_mips_store_compare(env, t0);
1183
}
1184

    
1185
void do_mtc0_status (target_ulong t0)
1186
{
1187
    uint32_t val, old;
1188
    uint32_t mask = env->CP0_Status_rw_bitmask;
1189

    
1190
    val = t0 & mask;
1191
    old = env->CP0_Status;
1192
    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1193
    compute_hflags(env);
1194
    if (loglevel & CPU_LOG_EXEC)
1195
        do_mtc0_status_debug(old, val);
1196
    cpu_mips_update_irq(env);
1197
}
1198

    
1199
void do_mttc0_status(target_ulong t0)
1200
{
1201
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1202
    int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
1203

    
1204
    env->CP0_Status = t0 & ~0xf1000018;
1205
    tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (t0 & (0xf << CP0St_CU0));
1206
    tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((t0 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
1207
    tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((t0 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
1208
    if (other_tc == env->current_tc)
1209
        env->active_tc.CP0_TCStatus = tcstatus;
1210
    else
1211
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1212
}
1213

    
1214
void do_mtc0_intctl (target_ulong t0)
1215
{
1216
    /* vectored interrupts not implemented, no performance counters. */
1217
    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (t0 & 0x000002e0);
1218
}
1219

    
1220
void do_mtc0_srsctl (target_ulong t0)
1221
{
1222
    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1223
    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (t0 & mask);
1224
}
1225

    
1226
void do_mtc0_cause (target_ulong t0)
1227
{
1228
    uint32_t mask = 0x00C00300;
1229
    uint32_t old = env->CP0_Cause;
1230

    
1231
    if (env->insn_flags & ISA_MIPS32R2)
1232
        mask |= 1 << CP0Ca_DC;
1233

    
1234
    env->CP0_Cause = (env->CP0_Cause & ~mask) | (t0 & mask);
1235

    
1236
    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1237
        if (env->CP0_Cause & (1 << CP0Ca_DC))
1238
            cpu_mips_stop_count(env);
1239
        else
1240
            cpu_mips_start_count(env);
1241
    }
1242

    
1243
    /* Handle the software interrupt as an hardware one, as they
1244
       are very similar */
1245
    if (t0 & CP0Ca_IP_mask) {
1246
        cpu_mips_update_irq(env);
1247
    }
1248
}
1249

    
1250
void do_mtc0_ebase (target_ulong t0)
1251
{
1252
    /* vectored interrupts not implemented */
1253
    /* Multi-CPU not implemented */
1254
    env->CP0_EBase = 0x80000000 | (t0 & 0x3FFFF000);
1255
}
1256

    
1257
void do_mtc0_config0 (target_ulong t0)
1258
{
1259
    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (t0 & 0x00000007);
1260
}
1261

    
1262
void do_mtc0_config2 (target_ulong t0)
1263
{
1264
    /* tertiary/secondary caches not implemented */
1265
    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1266
}
1267

    
1268
void do_mtc0_watchlo (target_ulong t0, uint32_t sel)
1269
{
1270
    /* Watch exceptions for instructions, data loads, data stores
1271
       not implemented. */
1272
    env->CP0_WatchLo[sel] = (t0 & ~0x7);
1273
}
1274

    
1275
void do_mtc0_watchhi (target_ulong t0, uint32_t sel)
1276
{
1277
    env->CP0_WatchHi[sel] = (t0 & 0x40FF0FF8);
1278
    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & t0 & 0x7);
1279
}
1280

    
1281
void do_mtc0_xcontext (target_ulong t0)
1282
{
1283
    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1284
    env->CP0_XContext = (env->CP0_XContext & mask) | (t0 & ~mask);
1285
}
1286

    
1287
void do_mtc0_framemask (target_ulong t0)
1288
{
1289
    env->CP0_Framemask = t0; /* XXX */
1290
}
1291

    
1292
void do_mtc0_debug (target_ulong t0)
1293
{
1294
    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (t0 & 0x13300120);
1295
    if (t0 & (1 << CP0DB_DM))
1296
        env->hflags |= MIPS_HFLAG_DM;
1297
    else
1298
        env->hflags &= ~MIPS_HFLAG_DM;
1299
}
1300

    
1301
void do_mttc0_debug(target_ulong t0)
1302
{
1303
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1304
    uint32_t val = t0 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1305

    
1306
    /* XXX: Might be wrong, check with EJTAG spec. */
1307
    if (other_tc == env->current_tc)
1308
        env->active_tc.CP0_Debug_tcstatus = val;
1309
    else
1310
        env->tcs[other_tc].CP0_Debug_tcstatus = val;
1311
    env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1312
                     (t0 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1313
}
1314

    
1315
void do_mtc0_performance0 (target_ulong t0)
1316
{
1317
    env->CP0_Performance0 = t0 & 0x000007ff;
1318
}
1319

    
1320
void do_mtc0_taglo (target_ulong t0)
1321
{
1322
    env->CP0_TagLo = t0 & 0xFFFFFCF6;
1323
}
1324

    
1325
void do_mtc0_datalo (target_ulong t0)
1326
{
1327
    env->CP0_DataLo = t0; /* XXX */
1328
}
1329

    
1330
void do_mtc0_taghi (target_ulong t0)
1331
{
1332
    env->CP0_TagHi = t0; /* XXX */
1333
}
1334

    
1335
void do_mtc0_datahi (target_ulong t0)
1336
{
1337
    env->CP0_DataHi = t0; /* XXX */
1338
}
1339

    
1340
void do_mtc0_status_debug(uint32_t old, uint32_t val)
1341
{
1342
    fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",
1343
            old, old & env->CP0_Cause & CP0Ca_IP_mask,
1344
            val, val & env->CP0_Cause & CP0Ca_IP_mask,
1345
            env->CP0_Cause);
1346
    switch (env->hflags & MIPS_HFLAG_KSU) {
1347
    case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
1348
    case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
1349
    case MIPS_HFLAG_KM: fputs("\n", logfile); break;
1350
    default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1351
    }
1352
}
1353

    
1354
void do_mtc0_status_irqraise_debug(void)
1355
{
1356
    fprintf(logfile, "Raise pending IRQs\n");
1357
}
1358
#endif /* !CONFIG_USER_ONLY */
1359

    
1360
/* MIPS MT functions */
1361
target_ulong do_mftgpr(target_ulong t0, uint32_t sel)
1362
{
1363
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1364

    
1365
    if (other_tc == env->current_tc)
1366
        return env->active_tc.gpr[sel];
1367
    else
1368
        return env->tcs[other_tc].gpr[sel];
1369
}
1370

    
1371
target_ulong do_mftlo(target_ulong t0, uint32_t sel)
1372
{
1373
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1374

    
1375
    if (other_tc == env->current_tc)
1376
        return env->active_tc.LO[sel];
1377
    else
1378
        return env->tcs[other_tc].LO[sel];
1379
}
1380

    
1381
target_ulong do_mfthi(target_ulong t0, uint32_t sel)
1382
{
1383
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1384

    
1385
    if (other_tc == env->current_tc)
1386
        return env->active_tc.HI[sel];
1387
    else
1388
        return env->tcs[other_tc].HI[sel];
1389
}
1390

    
1391
target_ulong do_mftacx(target_ulong t0, uint32_t sel)
1392
{
1393
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1394

    
1395
    if (other_tc == env->current_tc)
1396
        return env->active_tc.ACX[sel];
1397
    else
1398
        return env->tcs[other_tc].ACX[sel];
1399
}
1400

    
1401
target_ulong do_mftdsp(target_ulong t0)
1402
{
1403
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1404

    
1405
    if (other_tc == env->current_tc)
1406
        return env->active_tc.DSPControl;
1407
    else
1408
        return env->tcs[other_tc].DSPControl;
1409
}
1410

    
1411
void do_mttgpr(target_ulong t0, uint32_t sel)
1412
{
1413
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1414

    
1415
    if (other_tc == env->current_tc)
1416
        env->active_tc.gpr[sel] = t0;
1417
    else
1418
        env->tcs[other_tc].gpr[sel] = t0;
1419
}
1420

    
1421
void do_mttlo(target_ulong t0, uint32_t sel)
1422
{
1423
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1424

    
1425
    if (other_tc == env->current_tc)
1426
        env->active_tc.LO[sel] = t0;
1427
    else
1428
        env->tcs[other_tc].LO[sel] = t0;
1429
}
1430

    
1431
void do_mtthi(target_ulong t0, uint32_t sel)
1432
{
1433
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1434

    
1435
    if (other_tc == env->current_tc)
1436
        env->active_tc.HI[sel] = t0;
1437
    else
1438
        env->tcs[other_tc].HI[sel] = t0;
1439
}
1440

    
1441
void do_mttacx(target_ulong t0, uint32_t sel)
1442
{
1443
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1444

    
1445
    if (other_tc == env->current_tc)
1446
        env->active_tc.ACX[sel] = t0;
1447
    else
1448
        env->tcs[other_tc].ACX[sel] = t0;
1449
}
1450

    
1451
void do_mttdsp(target_ulong t0)
1452
{
1453
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1454

    
1455
    if (other_tc == env->current_tc)
1456
        env->active_tc.DSPControl = t0;
1457
    else
1458
        env->tcs[other_tc].DSPControl = t0;
1459
}
1460

    
1461
/* MIPS MT functions */
1462
target_ulong do_dmt(target_ulong t0)
1463
{
1464
    // TODO
1465
    t0 = 0;
1466
    // rt = t0
1467

    
1468
    return t0;
1469
}
1470

    
1471
target_ulong do_emt(target_ulong t0)
1472
{
1473
    // TODO
1474
    t0 = 0;
1475
    // rt = t0
1476

    
1477
    return t0;
1478
}
1479

    
1480
target_ulong do_dvpe(target_ulong t0)
1481
{
1482
    // TODO
1483
    t0 = 0;
1484
    // rt = t0
1485

    
1486
    return t0;
1487
}
1488

    
1489
target_ulong do_evpe(target_ulong t0)
1490
{
1491
    // TODO
1492
    t0 = 0;
1493
    // rt = t0
1494

    
1495
    return t0;
1496
}
1497

    
1498
void do_fork(target_ulong t0, target_ulong t1)
1499
{
1500
    // t0 = rt, t1 = rs
1501
    t0 = 0;
1502
    // TODO: store to TC register
1503
}
1504

    
1505
target_ulong do_yield(target_ulong t0)
1506
{
1507
    if (t0 < 0) {
1508
        /* No scheduling policy implemented. */
1509
        if (t0 != -2) {
1510
            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1511
                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1512
                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1513
                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1514
                do_raise_exception(EXCP_THREAD);
1515
            }
1516
        }
1517
    } else if (t0 == 0) {
1518
        if (0 /* TODO: TC underflow */) {
1519
            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1520
            do_raise_exception(EXCP_THREAD);
1521
        } else {
1522
            // TODO: Deallocate TC
1523
        }
1524
    } else if (t0 > 0) {
1525
        /* Yield qualifier inputs not implemented. */
1526
        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1527
        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1528
        do_raise_exception(EXCP_THREAD);
1529
    }
1530
    return env->CP0_YQMask;
1531
}
1532

    
1533
#ifndef CONFIG_USER_ONLY
1534
/* TLB management */
1535
void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1536
{
1537
    /* Flush qemu's TLB and discard all shadowed entries.  */
1538
    tlb_flush (env, flush_global);
1539
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
1540
}
1541

    
1542
static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1543
{
1544
    /* Discard entries from env->tlb[first] onwards.  */
1545
    while (env->tlb->tlb_in_use > first) {
1546
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1547
    }
1548
}
1549

    
1550
static void r4k_fill_tlb (int idx)
1551
{
1552
    r4k_tlb_t *tlb;
1553

    
1554
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1555
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1556
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1557
#if defined(TARGET_MIPS64)
1558
    tlb->VPN &= env->SEGMask;
1559
#endif
1560
    tlb->ASID = env->CP0_EntryHi & 0xFF;
1561
    tlb->PageMask = env->CP0_PageMask;
1562
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1563
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1564
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1565
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1566
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1567
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1568
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1569
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1570
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1571
}
1572

    
1573
void r4k_do_tlbwi (void)
1574
{
1575
    /* Discard cached TLB entries.  We could avoid doing this if the
1576
       tlbwi is just upgrading access permissions on the current entry;
1577
       that might be a further win.  */
1578
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1579

    
1580
    r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb, 0);
1581
    r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
1582
}
1583

    
1584
void r4k_do_tlbwr (void)
1585
{
1586
    int r = cpu_mips_get_random(env);
1587

    
1588
    r4k_invalidate_tlb(env, r, 1);
1589
    r4k_fill_tlb(r);
1590
}
1591

    
1592
void r4k_do_tlbp (void)
1593
{
1594
    r4k_tlb_t *tlb;
1595
    target_ulong mask;
1596
    target_ulong tag;
1597
    target_ulong VPN;
1598
    uint8_t ASID;
1599
    int i;
1600

    
1601
    ASID = env->CP0_EntryHi & 0xFF;
1602
    for (i = 0; i < env->tlb->nb_tlb; i++) {
1603
        tlb = &env->tlb->mmu.r4k.tlb[i];
1604
        /* 1k pages are not supported. */
1605
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1606
        tag = env->CP0_EntryHi & ~mask;
1607
        VPN = tlb->VPN & ~mask;
1608
        /* Check ASID, virtual page number & size */
1609
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1610
            /* TLB match */
1611
            env->CP0_Index = i;
1612
            break;
1613
        }
1614
    }
1615
    if (i == env->tlb->nb_tlb) {
1616
        /* No match.  Discard any shadow entries, if any of them match.  */
1617
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1618
            tlb = &env->tlb->mmu.r4k.tlb[i];
1619
            /* 1k pages are not supported. */
1620
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1621
            tag = env->CP0_EntryHi & ~mask;
1622
            VPN = tlb->VPN & ~mask;
1623
            /* Check ASID, virtual page number & size */
1624
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1625
                r4k_mips_tlb_flush_extra (env, i);
1626
                break;
1627
            }
1628
        }
1629

    
1630
        env->CP0_Index |= 0x80000000;
1631
    }
1632
}
1633

    
1634
void r4k_do_tlbr (void)
1635
{
1636
    r4k_tlb_t *tlb;
1637
    uint8_t ASID;
1638

    
1639
    ASID = env->CP0_EntryHi & 0xFF;
1640
    tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
1641

    
1642
    /* If this will change the current ASID, flush qemu's TLB.  */
1643
    if (ASID != tlb->ASID)
1644
        cpu_mips_tlb_flush (env, 1);
1645

    
1646
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1647

    
1648
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1649
    env->CP0_PageMask = tlb->PageMask;
1650
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1651
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1652
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1653
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1654
}
1655

    
1656
/* Specials */
1657
target_ulong do_di (void)
1658
{
1659
    target_ulong t0 = env->CP0_Status;
1660

    
1661
    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1662
    cpu_mips_update_irq(env);
1663

    
1664
    return t0;
1665
}
1666

    
1667
target_ulong do_ei (void)
1668
{
1669
    target_ulong t0 = env->CP0_Status;
1670

    
1671
    env->CP0_Status = t0 | (1 << CP0St_IE);
1672
    cpu_mips_update_irq(env);
1673

    
1674
    return t0;
1675
}
1676

    
1677
void debug_pre_eret (void)
1678
{
1679
    fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1680
            env->active_tc.PC, env->CP0_EPC);
1681
    if (env->CP0_Status & (1 << CP0St_ERL))
1682
        fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1683
    if (env->hflags & MIPS_HFLAG_DM)
1684
        fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1685
    fputs("\n", logfile);
1686
}
1687

    
1688
void debug_post_eret (void)
1689
{
1690
    fprintf(logfile, "  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1691
            env->active_tc.PC, env->CP0_EPC);
1692
    if (env->CP0_Status & (1 << CP0St_ERL))
1693
        fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1694
    if (env->hflags & MIPS_HFLAG_DM)
1695
        fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1696
    switch (env->hflags & MIPS_HFLAG_KSU) {
1697
    case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
1698
    case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
1699
    case MIPS_HFLAG_KM: fputs("\n", logfile); break;
1700
    default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1701
    }
1702
}
1703

    
1704
void do_eret (void)
1705
{
1706
    if (loglevel & CPU_LOG_EXEC)
1707
        debug_pre_eret();
1708
    if (env->CP0_Status & (1 << CP0St_ERL)) {
1709
        env->active_tc.PC = env->CP0_ErrorEPC;
1710
        env->CP0_Status &= ~(1 << CP0St_ERL);
1711
    } else {
1712
        env->active_tc.PC = env->CP0_EPC;
1713
        env->CP0_Status &= ~(1 << CP0St_EXL);
1714
    }
1715
    compute_hflags(env);
1716
    if (loglevel & CPU_LOG_EXEC)
1717
        debug_post_eret();
1718
    env->CP0_LLAddr = 1;
1719
}
1720

    
1721
void do_deret (void)
1722
{
1723
    if (loglevel & CPU_LOG_EXEC)
1724
        debug_pre_eret();
1725
    env->active_tc.PC = env->CP0_DEPC;
1726
    env->hflags &= MIPS_HFLAG_DM;
1727
    compute_hflags(env);
1728
    if (loglevel & CPU_LOG_EXEC)
1729
        debug_post_eret();
1730
    env->CP0_LLAddr = 1;
1731
}
1732
#endif /* !CONFIG_USER_ONLY */
1733

    
1734
target_ulong do_rdhwr_cpunum(void)
1735
{
1736
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1737
        (env->CP0_HWREna & (1 << 0)))
1738
        return env->CP0_EBase & 0x3ff;
1739
    else
1740
        do_raise_exception(EXCP_RI);
1741

    
1742
    return 0;
1743
}
1744

    
1745
target_ulong do_rdhwr_synci_step(void)
1746
{
1747
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1748
        (env->CP0_HWREna & (1 << 1)))
1749
        return env->SYNCI_Step;
1750
    else
1751
        do_raise_exception(EXCP_RI);
1752

    
1753
    return 0;
1754
}
1755

    
1756
target_ulong do_rdhwr_cc(void)
1757
{
1758
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1759
        (env->CP0_HWREna & (1 << 2)))
1760
        return env->CP0_Count;
1761
    else
1762
        do_raise_exception(EXCP_RI);
1763

    
1764
    return 0;
1765
}
1766

    
1767
target_ulong do_rdhwr_ccres(void)
1768
{
1769
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1770
        (env->CP0_HWREna & (1 << 3)))
1771
        return env->CCRes;
1772
    else
1773
        do_raise_exception(EXCP_RI);
1774

    
1775
    return 0;
1776
}
1777

    
1778
/* Bitfield operations. */
1779
target_ulong do_ext(target_ulong t1, uint32_t pos, uint32_t size)
1780
{
1781
    return (int32_t)((t1 >> pos) & ((size < 32) ? ((1 << size) - 1) : ~0));
1782
}
1783

    
1784
target_ulong do_ins(target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size)
1785
{
1786
    target_ulong mask = ((size < 32) ? ((1 << size) - 1) : ~0) << pos;
1787

    
1788
    return (int32_t)((t0 & ~mask) | ((t1 << pos) & mask));
1789
}
1790

    
1791
target_ulong do_wsbh(target_ulong t1)
1792
{
1793
    return (int32_t)(((t1 << 8) & ~0x00FF00FF) | ((t1 >> 8) & 0x00FF00FF));
1794
}
1795

    
1796
#if defined(TARGET_MIPS64)
1797
target_ulong do_dext(target_ulong t1, uint32_t pos, uint32_t size)
1798
{
1799
    return (t1 >> pos) & ((size < 64) ? ((1ULL << size) - 1) : ~0ULL);
1800
}
1801

    
1802
target_ulong do_dins(target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size)
1803
{
1804
    target_ulong mask = ((size < 64) ? ((1ULL << size) - 1) : ~0ULL) << pos;
1805

    
1806
    return (t0 & ~mask) | ((t1 << pos) & mask);
1807
}
1808

    
1809
target_ulong do_dsbh(target_ulong t1)
1810
{
1811
    return ((t1 << 8) & ~0x00FF00FF00FF00FFULL) | ((t1 >> 8) & 0x00FF00FF00FF00FFULL);
1812
}
1813

    
1814
target_ulong do_dshd(target_ulong t1)
1815
{
1816
    t1 = ((t1 << 16) & ~0x0000FFFF0000FFFFULL) | ((t1 >> 16) & 0x0000FFFF0000FFFFULL);
1817
    return (t1 << 32) | (t1 >> 32);
1818
}
1819
#endif
1820

    
1821
void do_pmon (int function)
1822
{
1823
    function /= 2;
1824
    switch (function) {
1825
    case 2: /* TODO: char inbyte(int waitflag); */
1826
        if (env->active_tc.gpr[4] == 0)
1827
            env->active_tc.gpr[2] = -1;
1828
        /* Fall through */
1829
    case 11: /* TODO: char inbyte (void); */
1830
        env->active_tc.gpr[2] = -1;
1831
        break;
1832
    case 3:
1833
    case 12:
1834
        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1835
        break;
1836
    case 17:
1837
        break;
1838
    case 158:
1839
        {
1840
            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
1841
            printf("%s", fmt);
1842
        }
1843
        break;
1844
    }
1845
}
1846

    
1847
void do_wait (void)
1848
{
1849
    env->halted = 1;
1850
    do_raise_exception(EXCP_HLT);
1851
}
1852

    
1853
#if !defined(CONFIG_USER_ONLY)
1854

    
1855
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1856

    
1857
#define MMUSUFFIX _mmu
1858
#define ALIGNED_ONLY
1859

    
1860
#define SHIFT 0
1861
#include "softmmu_template.h"
1862

    
1863
#define SHIFT 1
1864
#include "softmmu_template.h"
1865

    
1866
#define SHIFT 2
1867
#include "softmmu_template.h"
1868

    
1869
#define SHIFT 3
1870
#include "softmmu_template.h"
1871

    
1872
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
1873
{
1874
    env->CP0_BadVAddr = addr;
1875
    do_restore_state (retaddr);
1876
    do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
1877
}
1878

    
1879
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1880
{
1881
    TranslationBlock *tb;
1882
    CPUState *saved_env;
1883
    unsigned long pc;
1884
    int ret;
1885

    
1886
    /* XXX: hack to restore env in all cases, even if not called from
1887
       generated code */
1888
    saved_env = env;
1889
    env = cpu_single_env;
1890
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1891
    if (ret) {
1892
        if (retaddr) {
1893
            /* now we have a real cpu fault */
1894
            pc = (unsigned long)retaddr;
1895
            tb = tb_find_pc(pc);
1896
            if (tb) {
1897
                /* the PC is inside the translated code. It means that we have
1898
                   a virtual CPU fault */
1899
                cpu_restore_state(tb, env, pc, NULL);
1900
            }
1901
        }
1902
        do_raise_exception_err(env->exception_index, env->error_code);
1903
    }
1904
    env = saved_env;
1905
}
1906

    
1907
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
1908
                          int unused)
1909
{
1910
    if (is_exec)
1911
        do_raise_exception(EXCP_IBE);
1912
    else
1913
        do_raise_exception(EXCP_DBE);
1914
}
1915
#endif /* !CONFIG_USER_ONLY */
1916

    
1917
/* Complex FPU operations which may need stack space. */
1918

    
1919
#define FLOAT_ONE32 make_float32(0x3f8 << 20)
1920
#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
1921
#define FLOAT_TWO32 make_float32(1 << 30)
1922
#define FLOAT_TWO64 make_float64(1ULL << 62)
1923
#define FLOAT_QNAN32 0x7fbfffff
1924
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
1925
#define FLOAT_SNAN32 0x7fffffff
1926
#define FLOAT_SNAN64 0x7fffffffffffffffULL
1927

    
1928
/* convert MIPS rounding mode in FCR31 to IEEE library */
1929
unsigned int ieee_rm[] = {
1930
    float_round_nearest_even,
1931
    float_round_to_zero,
1932
    float_round_up,
1933
    float_round_down
1934
};
1935

    
1936
#define RESTORE_ROUNDING_MODE \
1937
    set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
1938

    
1939
target_ulong do_cfc1 (uint32_t reg)
1940
{
1941
    target_ulong t0;
1942

    
1943
    switch (reg) {
1944
    case 0:
1945
        t0 = (int32_t)env->fpu->fcr0;
1946
        break;
1947
    case 25:
1948
        t0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1);
1949
        break;
1950
    case 26:
1951
        t0 = env->fpu->fcr31 & 0x0003f07c;
1952
        break;
1953
    case 28:
1954
        t0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4);
1955
        break;
1956
    default:
1957
        t0 = (int32_t)env->fpu->fcr31;
1958
        break;
1959
    }
1960

    
1961
    return t0;
1962
}
1963

    
1964
void do_ctc1 (target_ulong t0, uint32_t reg)
1965
{
1966
    switch(reg) {
1967
    case 25:
1968
        if (t0 & 0xffffff00)
1969
            return;
1970
        env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((t0 & 0xfe) << 24) |
1971
                     ((t0 & 0x1) << 23);
1972
        break;
1973
    case 26:
1974
        if (t0 & 0x007c0000)
1975
            return;
1976
        env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (t0 & 0x0003f07c);
1977
        break;
1978
    case 28:
1979
        if (t0 & 0x007c0000)
1980
            return;
1981
        env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (t0 & 0x00000f83) |
1982
                     ((t0 & 0x4) << 22);
1983
        break;
1984
    case 31:
1985
        if (t0 & 0x007c0000)
1986
            return;
1987
        env->fpu->fcr31 = t0;
1988
        break;
1989
    default:
1990
        return;
1991
    }
1992
    /* set rounding mode */
1993
    RESTORE_ROUNDING_MODE;
1994
    set_float_exception_flags(0, &env->fpu->fp_status);
1995
    if ((GET_FP_ENABLE(env->fpu->fcr31) | 0x20) & GET_FP_CAUSE(env->fpu->fcr31))
1996
        do_raise_exception(EXCP_FPE);
1997
}
1998

    
1999
static inline char ieee_ex_to_mips(char xcpt)
2000
{
2001
    return (xcpt & float_flag_inexact) >> 5 |
2002
           (xcpt & float_flag_underflow) >> 3 |
2003
           (xcpt & float_flag_overflow) >> 1 |
2004
           (xcpt & float_flag_divbyzero) << 1 |
2005
           (xcpt & float_flag_invalid) << 4;
2006
}
2007

    
2008
static inline char mips_ex_to_ieee(char xcpt)
2009
{
2010
    return (xcpt & FP_INEXACT) << 5 |
2011
           (xcpt & FP_UNDERFLOW) << 3 |
2012
           (xcpt & FP_OVERFLOW) << 1 |
2013
           (xcpt & FP_DIV0) >> 1 |
2014
           (xcpt & FP_INVALID) >> 4;
2015
}
2016

    
2017
static inline void update_fcr31(void)
2018
{
2019
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->fpu->fp_status));
2020

    
2021
    SET_FP_CAUSE(env->fpu->fcr31, tmp);
2022
    if (GET_FP_ENABLE(env->fpu->fcr31) & tmp)
2023
        do_raise_exception(EXCP_FPE);
2024
    else
2025
        UPDATE_FP_FLAGS(env->fpu->fcr31, tmp);
2026
}
2027

    
2028
/* Float support.
2029
   Single precition routines have a "s" suffix, double precision a
2030
   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2031
   paired single lower "pl", paired single upper "pu".  */
2032

    
2033
/* unary operations, modifying fp status  */
2034
uint64_t do_float_sqrt_d(uint64_t fdt0)
2035
{
2036
    return float64_sqrt(fdt0, &env->fpu->fp_status);
2037
}
2038

    
2039
uint32_t do_float_sqrt_s(uint32_t fst0)
2040
{
2041
    return float32_sqrt(fst0, &env->fpu->fp_status);
2042
}
2043

    
2044
uint64_t do_float_cvtd_s(uint32_t fst0)
2045
{
2046
    uint64_t fdt2;
2047

    
2048
    set_float_exception_flags(0, &env->fpu->fp_status);
2049
    fdt2 = float32_to_float64(fst0, &env->fpu->fp_status);
2050
    update_fcr31();
2051
    return fdt2;
2052
}
2053

    
2054
uint64_t do_float_cvtd_w(uint32_t wt0)
2055
{
2056
    uint64_t fdt2;
2057

    
2058
    set_float_exception_flags(0, &env->fpu->fp_status);
2059
    fdt2 = int32_to_float64(wt0, &env->fpu->fp_status);
2060
    update_fcr31();
2061
    return fdt2;
2062
}
2063

    
2064
uint64_t do_float_cvtd_l(uint64_t dt0)
2065
{
2066
    uint64_t fdt2;
2067

    
2068
    set_float_exception_flags(0, &env->fpu->fp_status);
2069
    fdt2 = int64_to_float64(dt0, &env->fpu->fp_status);
2070
    update_fcr31();
2071
    return fdt2;
2072
}
2073

    
2074
uint64_t do_float_cvtl_d(uint64_t fdt0)
2075
{
2076
    uint64_t dt2;
2077

    
2078
    set_float_exception_flags(0, &env->fpu->fp_status);
2079
    dt2 = float64_to_int64(fdt0, &env->fpu->fp_status);
2080
    update_fcr31();
2081
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2082
        dt2 = FLOAT_SNAN64;
2083
    return dt2;
2084
}
2085

    
2086
uint64_t do_float_cvtl_s(uint32_t fst0)
2087
{
2088
    uint64_t dt2;
2089

    
2090
    set_float_exception_flags(0, &env->fpu->fp_status);
2091
    dt2 = float32_to_int64(fst0, &env->fpu->fp_status);
2092
    update_fcr31();
2093
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2094
        dt2 = FLOAT_SNAN64;
2095
    return dt2;
2096
}
2097

    
2098
uint64_t do_float_cvtps_pw(uint64_t dt0)
2099
{
2100
    uint32_t fst2;
2101
    uint32_t fsth2;
2102

    
2103
    set_float_exception_flags(0, &env->fpu->fp_status);
2104
    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->fpu->fp_status);
2105
    fsth2 = int32_to_float32(dt0 >> 32, &env->fpu->fp_status);
2106
    update_fcr31();
2107
    return ((uint64_t)fsth2 << 32) | fst2;
2108
}
2109

    
2110
uint64_t do_float_cvtpw_ps(uint64_t fdt0)
2111
{
2112
    uint32_t wt2;
2113
    uint32_t wth2;
2114

    
2115
    set_float_exception_flags(0, &env->fpu->fp_status);
2116
    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->fpu->fp_status);
2117
    wth2 = float32_to_int32(fdt0 >> 32, &env->fpu->fp_status);
2118
    update_fcr31();
2119
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2120
        wt2 = FLOAT_SNAN32;
2121
        wth2 = FLOAT_SNAN32;
2122
    }
2123
    return ((uint64_t)wth2 << 32) | wt2;
2124
}
2125

    
2126
uint32_t do_float_cvts_d(uint64_t fdt0)
2127
{
2128
    uint32_t fst2;
2129

    
2130
    set_float_exception_flags(0, &env->fpu->fp_status);
2131
    fst2 = float64_to_float32(fdt0, &env->fpu->fp_status);
2132
    update_fcr31();
2133
    return fst2;
2134
}
2135

    
2136
uint32_t do_float_cvts_w(uint32_t wt0)
2137
{
2138
    uint32_t fst2;
2139

    
2140
    set_float_exception_flags(0, &env->fpu->fp_status);
2141
    fst2 = int32_to_float32(wt0, &env->fpu->fp_status);
2142
    update_fcr31();
2143
    return fst2;
2144
}
2145

    
2146
uint32_t do_float_cvts_l(uint64_t dt0)
2147
{
2148
    uint32_t fst2;
2149

    
2150
    set_float_exception_flags(0, &env->fpu->fp_status);
2151
    fst2 = int64_to_float32(dt0, &env->fpu->fp_status);
2152
    update_fcr31();
2153
    return fst2;
2154
}
2155

    
2156
uint32_t do_float_cvts_pl(uint32_t wt0)
2157
{
2158
    uint32_t wt2;
2159

    
2160
    set_float_exception_flags(0, &env->fpu->fp_status);
2161
    wt2 = wt0;
2162
    update_fcr31();
2163
    return wt2;
2164
}
2165

    
2166
uint32_t do_float_cvts_pu(uint32_t wth0)
2167
{
2168
    uint32_t wt2;
2169

    
2170
    set_float_exception_flags(0, &env->fpu->fp_status);
2171
    wt2 = wth0;
2172
    update_fcr31();
2173
    return wt2;
2174
}
2175

    
2176
uint32_t do_float_cvtw_s(uint32_t fst0)
2177
{
2178
    uint32_t wt2;
2179

    
2180
    set_float_exception_flags(0, &env->fpu->fp_status);
2181
    wt2 = float32_to_int32(fst0, &env->fpu->fp_status);
2182
    update_fcr31();
2183
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2184
        wt2 = FLOAT_SNAN32;
2185
    return wt2;
2186
}
2187

    
2188
uint32_t do_float_cvtw_d(uint64_t fdt0)
2189
{
2190
    uint32_t wt2;
2191

    
2192
    set_float_exception_flags(0, &env->fpu->fp_status);
2193
    wt2 = float64_to_int32(fdt0, &env->fpu->fp_status);
2194
    update_fcr31();
2195
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2196
        wt2 = FLOAT_SNAN32;
2197
    return wt2;
2198
}
2199

    
2200
uint64_t do_float_roundl_d(uint64_t fdt0)
2201
{
2202
    uint64_t dt2;
2203

    
2204
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
2205
    dt2 = float64_to_int64(fdt0, &env->fpu->fp_status);
2206
    RESTORE_ROUNDING_MODE;
2207
    update_fcr31();
2208
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2209
        dt2 = FLOAT_SNAN64;
2210
    return dt2;
2211
}
2212

    
2213
uint64_t do_float_roundl_s(uint32_t fst0)
2214
{
2215
    uint64_t dt2;
2216

    
2217
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
2218
    dt2 = float32_to_int64(fst0, &env->fpu->fp_status);
2219
    RESTORE_ROUNDING_MODE;
2220
    update_fcr31();
2221
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2222
        dt2 = FLOAT_SNAN64;
2223
    return dt2;
2224
}
2225

    
2226
uint32_t do_float_roundw_d(uint64_t fdt0)
2227
{
2228
    uint32_t wt2;
2229

    
2230
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
2231
    wt2 = float64_to_int32(fdt0, &env->fpu->fp_status);
2232
    RESTORE_ROUNDING_MODE;
2233
    update_fcr31();
2234
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2235
        wt2 = FLOAT_SNAN32;
2236
    return wt2;
2237
}
2238

    
2239
uint32_t do_float_roundw_s(uint32_t fst0)
2240
{
2241
    uint32_t wt2;
2242

    
2243
    set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
2244
    wt2 = float32_to_int32(fst0, &env->fpu->fp_status);
2245
    RESTORE_ROUNDING_MODE;
2246
    update_fcr31();
2247
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2248
        wt2 = FLOAT_SNAN32;
2249
    return wt2;
2250
}
2251

    
2252
uint64_t do_float_truncl_d(uint64_t fdt0)
2253
{
2254
    uint64_t dt2;
2255

    
2256
    dt2 = float64_to_int64_round_to_zero(fdt0, &env->fpu->fp_status);
2257
    update_fcr31();
2258
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2259
        dt2 = FLOAT_SNAN64;
2260
    return dt2;
2261
}
2262

    
2263
uint64_t do_float_truncl_s(uint32_t fst0)
2264
{
2265
    uint64_t dt2;
2266

    
2267
    dt2 = float32_to_int64_round_to_zero(fst0, &env->fpu->fp_status);
2268
    update_fcr31();
2269
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2270
        dt2 = FLOAT_SNAN64;
2271
    return dt2;
2272
}
2273

    
2274
uint32_t do_float_truncw_d(uint64_t fdt0)
2275
{
2276
    uint32_t wt2;
2277

    
2278
    wt2 = float64_to_int32_round_to_zero(fdt0, &env->fpu->fp_status);
2279
    update_fcr31();
2280
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2281
        wt2 = FLOAT_SNAN32;
2282
    return wt2;
2283
}
2284

    
2285
uint32_t do_float_truncw_s(uint32_t fst0)
2286
{
2287
    uint32_t wt2;
2288

    
2289
    wt2 = float32_to_int32_round_to_zero(fst0, &env->fpu->fp_status);
2290
    update_fcr31();
2291
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2292
        wt2 = FLOAT_SNAN32;
2293
    return wt2;
2294
}
2295

    
2296
uint64_t do_float_ceill_d(uint64_t fdt0)
2297
{
2298
    uint64_t dt2;
2299

    
2300
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
2301
    dt2 = float64_to_int64(fdt0, &env->fpu->fp_status);
2302
    RESTORE_ROUNDING_MODE;
2303
    update_fcr31();
2304
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2305
        dt2 = FLOAT_SNAN64;
2306
    return dt2;
2307
}
2308

    
2309
uint64_t do_float_ceill_s(uint32_t fst0)
2310
{
2311
    uint64_t dt2;
2312

    
2313
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
2314
    dt2 = float32_to_int64(fst0, &env->fpu->fp_status);
2315
    RESTORE_ROUNDING_MODE;
2316
    update_fcr31();
2317
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2318
        dt2 = FLOAT_SNAN64;
2319
    return dt2;
2320
}
2321

    
2322
uint32_t do_float_ceilw_d(uint64_t fdt0)
2323
{
2324
    uint32_t wt2;
2325

    
2326
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
2327
    wt2 = float64_to_int32(fdt0, &env->fpu->fp_status);
2328
    RESTORE_ROUNDING_MODE;
2329
    update_fcr31();
2330
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2331
        wt2 = FLOAT_SNAN32;
2332
    return wt2;
2333
}
2334

    
2335
uint32_t do_float_ceilw_s(uint32_t fst0)
2336
{
2337
    uint32_t wt2;
2338

    
2339
    set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
2340
    wt2 = float32_to_int32(fst0, &env->fpu->fp_status);
2341
    RESTORE_ROUNDING_MODE;
2342
    update_fcr31();
2343
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2344
        wt2 = FLOAT_SNAN32;
2345
    return wt2;
2346
}
2347

    
2348
uint64_t do_float_floorl_d(uint64_t fdt0)
2349
{
2350
    uint64_t dt2;
2351

    
2352
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
2353
    dt2 = float64_to_int64(fdt0, &env->fpu->fp_status);
2354
    RESTORE_ROUNDING_MODE;
2355
    update_fcr31();
2356
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2357
        dt2 = FLOAT_SNAN64;
2358
    return dt2;
2359
}
2360

    
2361
uint64_t do_float_floorl_s(uint32_t fst0)
2362
{
2363
    uint64_t dt2;
2364

    
2365
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
2366
    dt2 = float32_to_int64(fst0, &env->fpu->fp_status);
2367
    RESTORE_ROUNDING_MODE;
2368
    update_fcr31();
2369
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2370
        dt2 = FLOAT_SNAN64;
2371
    return dt2;
2372
}
2373

    
2374
uint32_t do_float_floorw_d(uint64_t fdt0)
2375
{
2376
    uint32_t wt2;
2377

    
2378
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
2379
    wt2 = float64_to_int32(fdt0, &env->fpu->fp_status);
2380
    RESTORE_ROUNDING_MODE;
2381
    update_fcr31();
2382
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2383
        wt2 = FLOAT_SNAN32;
2384
    return wt2;
2385
}
2386

    
2387
uint32_t do_float_floorw_s(uint32_t fst0)
2388
{
2389
    uint32_t wt2;
2390

    
2391
    set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
2392
    wt2 = float32_to_int32(fst0, &env->fpu->fp_status);
2393
    RESTORE_ROUNDING_MODE;
2394
    update_fcr31();
2395
    if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
2396
        wt2 = FLOAT_SNAN32;
2397
    return wt2;
2398
}
2399

    
2400
/* unary operations, not modifying fp status  */
2401
#define FLOAT_UNOP(name)                                       \
2402
uint64_t do_float_ ## name ## _d(uint64_t fdt0)                \
2403
{                                                              \
2404
    return float64_ ## name(fdt0);                             \
2405
}                                                              \
2406
uint32_t do_float_ ## name ## _s(uint32_t fst0)                \
2407
{                                                              \
2408
    return float32_ ## name(fst0);                             \
2409
}                                                              \
2410
uint64_t do_float_ ## name ## _ps(uint64_t fdt0)               \
2411
{                                                              \
2412
    uint32_t wt0;                                              \
2413
    uint32_t wth0;                                             \
2414
                                                               \
2415
    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2416
    wth0 = float32_ ## name(fdt0 >> 32);                       \
2417
    return ((uint64_t)wth0 << 32) | wt0;                       \
2418
}
2419
FLOAT_UNOP(abs)
2420
FLOAT_UNOP(chs)
2421
#undef FLOAT_UNOP
2422

    
2423
/* MIPS specific unary operations */
2424
uint64_t do_float_recip_d(uint64_t fdt0)
2425
{
2426
    uint64_t fdt2;
2427

    
2428
    set_float_exception_flags(0, &env->fpu->fp_status);
2429
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->fpu->fp_status);
2430
    update_fcr31();
2431
    return fdt2;
2432
}
2433

    
2434
uint32_t do_float_recip_s(uint32_t fst0)
2435
{
2436
    uint32_t fst2;
2437

    
2438
    set_float_exception_flags(0, &env->fpu->fp_status);
2439
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->fpu->fp_status);
2440
    update_fcr31();
2441
    return fst2;
2442
}
2443

    
2444
uint64_t do_float_rsqrt_d(uint64_t fdt0)
2445
{
2446
    uint64_t fdt2;
2447

    
2448
    set_float_exception_flags(0, &env->fpu->fp_status);
2449
    fdt2 = float64_sqrt(fdt0, &env->fpu->fp_status);
2450
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->fpu->fp_status);
2451
    update_fcr31();
2452
    return fdt2;
2453
}
2454

    
2455
uint32_t do_float_rsqrt_s(uint32_t fst0)
2456
{
2457
    uint32_t fst2;
2458

    
2459
    set_float_exception_flags(0, &env->fpu->fp_status);
2460
    fst2 = float32_sqrt(fst0, &env->fpu->fp_status);
2461
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->fpu->fp_status);
2462
    update_fcr31();
2463
    return fst2;
2464
}
2465

    
2466
uint64_t do_float_recip1_d(uint64_t fdt0)
2467
{
2468
    uint64_t fdt2;
2469

    
2470
    set_float_exception_flags(0, &env->fpu->fp_status);
2471
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->fpu->fp_status);
2472
    update_fcr31();
2473
    return fdt2;
2474
}
2475

    
2476
uint32_t do_float_recip1_s(uint32_t fst0)
2477
{
2478
    uint32_t fst2;
2479

    
2480
    set_float_exception_flags(0, &env->fpu->fp_status);
2481
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->fpu->fp_status);
2482
    update_fcr31();
2483
    return fst2;
2484
}
2485

    
2486
uint64_t do_float_recip1_ps(uint64_t fdt0)
2487
{
2488
    uint32_t fst2;
2489
    uint32_t fsth2;
2490

    
2491
    set_float_exception_flags(0, &env->fpu->fp_status);
2492
    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->fpu->fp_status);
2493
    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->fpu->fp_status);
2494
    update_fcr31();
2495
    return ((uint64_t)fsth2 << 32) | fst2;
2496
}
2497

    
2498
uint64_t do_float_rsqrt1_d(uint64_t fdt0)
2499
{
2500
    uint64_t fdt2;
2501

    
2502
    set_float_exception_flags(0, &env->fpu->fp_status);
2503
    fdt2 = float64_sqrt(fdt0, &env->fpu->fp_status);
2504
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->fpu->fp_status);
2505
    update_fcr31();
2506
    return fdt2;
2507
}
2508

    
2509
uint32_t do_float_rsqrt1_s(uint32_t fst0)
2510
{
2511
    uint32_t fst2;
2512

    
2513
    set_float_exception_flags(0, &env->fpu->fp_status);
2514
    fst2 = float32_sqrt(fst0, &env->fpu->fp_status);
2515
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->fpu->fp_status);
2516
    update_fcr31();
2517
    return fst2;
2518
}
2519

    
2520
uint64_t do_float_rsqrt1_ps(uint64_t fdt0)
2521
{
2522
    uint32_t fst2;
2523
    uint32_t fsth2;
2524

    
2525
    set_float_exception_flags(0, &env->fpu->fp_status);
2526
    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->fpu->fp_status);
2527
    fsth2 = float32_sqrt(fdt0 >> 32, &env->fpu->fp_status);
2528
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->fpu->fp_status);
2529
    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->fpu->fp_status);
2530
    update_fcr31();
2531
    return ((uint64_t)fsth2 << 32) | fst2;
2532
}
2533

    
2534
#define FLOAT_OP(name, p) void do_float_##name##_##p(void)
2535

    
2536
/* binary operations */
2537
#define FLOAT_BINOP(name)                                          \
2538
uint64_t do_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
2539
{                                                                  \
2540
    uint64_t dt2;                                                  \
2541
                                                                   \
2542
    set_float_exception_flags(0, &env->fpu->fp_status);            \
2543
    dt2 = float64_ ## name (fdt0, fdt1, &env->fpu->fp_status);     \
2544
    update_fcr31();                                                \
2545
    if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID)                \
2546
        dt2 = FLOAT_QNAN64;                                        \
2547
    return dt2;                                                    \
2548
}                                                                  \
2549
                                                                   \
2550
uint32_t do_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
2551
{                                                                  \
2552
    uint32_t wt2;                                                  \
2553
                                                                   \
2554
    set_float_exception_flags(0, &env->fpu->fp_status);            \
2555
    wt2 = float32_ ## name (fst0, fst1, &env->fpu->fp_status);     \
2556
    update_fcr31();                                                \
2557
    if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID)                \
2558
        wt2 = FLOAT_QNAN32;                                        \
2559
    return wt2;                                                    \
2560
}                                                                  \
2561
                                                                   \
2562
uint64_t do_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
2563
{                                                                  \
2564
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2565
    uint32_t fsth0 = fdt0 >> 32;                                   \
2566
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2567
    uint32_t fsth1 = fdt1 >> 32;                                   \
2568
    uint32_t wt2;                                                  \
2569
    uint32_t wth2;                                                 \
2570
                                                                   \
2571
    set_float_exception_flags(0, &env->fpu->fp_status);            \
2572
    wt2 = float32_ ## name (fst0, fst1, &env->fpu->fp_status);     \
2573
    wth2 = float32_ ## name (fsth0, fsth1, &env->fpu->fp_status);  \
2574
    update_fcr31();                                                \
2575
    if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) {              \
2576
        wt2 = FLOAT_QNAN32;                                        \
2577
        wth2 = FLOAT_QNAN32;                                       \
2578
    }                                                              \
2579
    return ((uint64_t)wth2 << 32) | wt2;                           \
2580
}
2581

    
2582
FLOAT_BINOP(add)
2583
FLOAT_BINOP(sub)
2584
FLOAT_BINOP(mul)
2585
FLOAT_BINOP(div)
2586
#undef FLOAT_BINOP
2587

    
2588
/* ternary operations */
2589
#define FLOAT_TERNOP(name1, name2)                                        \
2590
uint64_t do_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
2591
                                           uint64_t fdt2)                 \
2592
{                                                                         \
2593
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->fpu->fp_status);          \
2594
    return float64_ ## name2 (fdt0, fdt2, &env->fpu->fp_status);          \
2595
}                                                                         \
2596
                                                                          \
2597
uint32_t do_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
2598
                                           uint32_t fst2)                 \
2599
{                                                                         \
2600
    fst0 = float32_ ## name1 (fst0, fst1, &env->fpu->fp_status);          \
2601
    return float32_ ## name2 (fst0, fst2, &env->fpu->fp_status);          \
2602
}                                                                         \
2603
                                                                          \
2604
uint64_t do_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2605
                                            uint64_t fdt2)                \
2606
{                                                                         \
2607
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2608
    uint32_t fsth0 = fdt0 >> 32;                                          \
2609
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2610
    uint32_t fsth1 = fdt1 >> 32;                                          \
2611
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2612
    uint32_t fsth2 = fdt2 >> 32;                                          \
2613
                                                                          \
2614
    fst0 = float32_ ## name1 (fst0, fst1, &env->fpu->fp_status);          \
2615
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->fpu->fp_status);       \
2616
    fst2 = float32_ ## name2 (fst0, fst2, &env->fpu->fp_status);          \
2617
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->fpu->fp_status);       \
2618
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2619
}
2620

    
2621
FLOAT_TERNOP(mul, add)
2622
FLOAT_TERNOP(mul, sub)
2623
#undef FLOAT_TERNOP
2624

    
2625
/* negated ternary operations */
2626
#define FLOAT_NTERNOP(name1, name2)                                       \
2627
uint64_t do_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2628
                                           uint64_t fdt2)                 \
2629
{                                                                         \
2630
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->fpu->fp_status);          \
2631
    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->fpu->fp_status);          \
2632
    return float64_chs(fdt2);                                             \
2633
}                                                                         \
2634
                                                                          \
2635
uint32_t do_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2636
                                           uint32_t fst2)                 \
2637
{                                                                         \
2638
    fst0 = float32_ ## name1 (fst0, fst1, &env->fpu->fp_status);          \
2639
    fst2 = float32_ ## name2 (fst0, fst2, &env->fpu->fp_status);          \
2640
    return float32_chs(fst2);                                             \
2641
}                                                                         \
2642
                                                                          \
2643
uint64_t do_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2644
                                           uint64_t fdt2)                 \
2645
{                                                                         \
2646
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2647
    uint32_t fsth0 = fdt0 >> 32;                                          \
2648
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2649
    uint32_t fsth1 = fdt1 >> 32;                                          \
2650
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2651
    uint32_t fsth2 = fdt2 >> 32;                                          \
2652
                                                                          \
2653
    fst0 = float32_ ## name1 (fst0, fst1, &env->fpu->fp_status);          \
2654
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->fpu->fp_status);       \
2655
    fst2 = float32_ ## name2 (fst0, fst2, &env->fpu->fp_status);          \
2656
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->fpu->fp_status);       \
2657
    fst2 = float32_chs(fst2);                                             \
2658
    fsth2 = float32_chs(fsth2);                                           \
2659
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2660
}
2661

    
2662
FLOAT_NTERNOP(mul, add)
2663
FLOAT_NTERNOP(mul, sub)
2664
#undef FLOAT_NTERNOP
2665

    
2666
/* MIPS specific binary operations */
2667
uint64_t do_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2668
{
2669
    set_float_exception_flags(0, &env->fpu->fp_status);
2670
    fdt2 = float64_mul(fdt0, fdt2, &env->fpu->fp_status);
2671
    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->fpu->fp_status));
2672
    update_fcr31();
2673
    return fdt2;
2674
}
2675

    
2676
uint32_t do_float_recip2_s(uint32_t fst0, uint32_t fst2)
2677
{
2678
    set_float_exception_flags(0, &env->fpu->fp_status);
2679
    fst2 = float32_mul(fst0, fst2, &env->fpu->fp_status);
2680
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->fpu->fp_status));
2681
    update_fcr31();
2682
    return fst2;
2683
}
2684

    
2685
uint64_t do_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2686
{
2687
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2688
    uint32_t fsth0 = fdt0 >> 32;
2689
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2690
    uint32_t fsth2 = fdt2 >> 32;
2691

    
2692
    set_float_exception_flags(0, &env->fpu->fp_status);
2693
    fst2 = float32_mul(fst0, fst2, &env->fpu->fp_status);
2694
    fsth2 = float32_mul(fsth0, fsth2, &env->fpu->fp_status);
2695
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->fpu->fp_status));
2696
    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->fpu->fp_status));
2697
    update_fcr31();
2698
    return ((uint64_t)fsth2 << 32) | fst2;
2699
}
2700

    
2701
uint64_t do_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
2702
{
2703
    set_float_exception_flags(0, &env->fpu->fp_status);
2704
    fdt2 = float64_mul(fdt0, fdt2, &env->fpu->fp_status);
2705
    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->fpu->fp_status);
2706
    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->fpu->fp_status));
2707
    update_fcr31();
2708
    return fdt2;
2709
}
2710

    
2711
uint32_t do_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
2712
{
2713
    set_float_exception_flags(0, &env->fpu->fp_status);
2714
    fst2 = float32_mul(fst0, fst2, &env->fpu->fp_status);
2715
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->fpu->fp_status);
2716
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->fpu->fp_status));
2717
    update_fcr31();
2718
    return fst2;
2719
}
2720

    
2721
uint64_t do_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
2722
{
2723
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2724
    uint32_t fsth0 = fdt0 >> 32;
2725
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2726
    uint32_t fsth2 = fdt2 >> 32;
2727

    
2728
    set_float_exception_flags(0, &env->fpu->fp_status);
2729
    fst2 = float32_mul(fst0, fst2, &env->fpu->fp_status);
2730
    fsth2 = float32_mul(fsth0, fsth2, &env->fpu->fp_status);
2731
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->fpu->fp_status);
2732
    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->fpu->fp_status);
2733
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->fpu->fp_status));
2734
    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->fpu->fp_status));
2735
    update_fcr31();
2736
    return ((uint64_t)fsth2 << 32) | fst2;
2737
}
2738

    
2739
uint64_t do_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
2740
{
2741
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2742
    uint32_t fsth0 = fdt0 >> 32;
2743
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2744
    uint32_t fsth1 = fdt1 >> 32;
2745
    uint32_t fst2;
2746
    uint32_t fsth2;
2747

    
2748
    set_float_exception_flags(0, &env->fpu->fp_status);
2749
    fst2 = float32_add (fst0, fsth0, &env->fpu->fp_status);
2750
    fsth2 = float32_add (fst1, fsth1, &env->fpu->fp_status);
2751
    update_fcr31();
2752
    return ((uint64_t)fsth2 << 32) | fst2;
2753
}
2754

    
2755
uint64_t do_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
2756
{
2757
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2758
    uint32_t fsth0 = fdt0 >> 32;
2759
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2760
    uint32_t fsth1 = fdt1 >> 32;
2761
    uint32_t fst2;
2762
    uint32_t fsth2;
2763

    
2764
    set_float_exception_flags(0, &env->fpu->fp_status);
2765
    fst2 = float32_mul (fst0, fsth0, &env->fpu->fp_status);
2766
    fsth2 = float32_mul (fst1, fsth1, &env->fpu->fp_status);
2767
    update_fcr31();
2768
    return ((uint64_t)fsth2 << 32) | fst2;
2769
}
2770

    
2771
/* compare operations */
2772
#define FOP_COND_D(op, cond)                                   \
2773
void do_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2774
{                                                              \
2775
    int c = cond;                                              \
2776
    update_fcr31();                                            \
2777
    if (c)                                                     \
2778
        SET_FP_COND(cc, env->fpu);                             \
2779
    else                                                       \
2780
        CLEAR_FP_COND(cc, env->fpu);                           \
2781
}                                                              \
2782
void do_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2783
{                                                              \
2784
    int c;                                                     \
2785
    fdt0 = float64_abs(fdt0);                                  \
2786
    fdt1 = float64_abs(fdt1);                                  \
2787
    c = cond;                                                  \
2788
    update_fcr31();                                            \
2789
    if (c)                                                     \
2790
        SET_FP_COND(cc, env->fpu);                             \
2791
    else                                                       \
2792
        CLEAR_FP_COND(cc, env->fpu);                           \
2793
}
2794

    
2795
int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
2796
{
2797
    if (float64_is_signaling_nan(a) ||
2798
        float64_is_signaling_nan(b) ||
2799
        (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
2800
        float_raise(float_flag_invalid, status);
2801
        return 1;
2802
    } else if (float64_is_nan(a) || float64_is_nan(b)) {
2803
        return 1;
2804
    } else {
2805
        return 0;
2806
    }
2807
}
2808

    
2809
/* NOTE: the comma operator will make "cond" to eval to false,
2810
 * but float*_is_unordered() is still called. */
2811
FOP_COND_D(f,   (float64_is_unordered(0, fdt1, fdt0, &env->fpu->fp_status), 0))
2812
FOP_COND_D(un,  float64_is_unordered(0, fdt1, fdt0, &env->fpu->fp_status))
2813
FOP_COND_D(eq,  !float64_is_unordered(0, fdt1, fdt0, &env->fpu->fp_status) && float64_eq(fdt0, fdt1, &env->fpu->fp_status))
2814
FOP_COND_D(ueq, float64_is_unordered(0, fdt1, fdt0, &env->fpu->fp_status)  || float64_eq(fdt0, fdt1, &env->fpu->fp_status))
2815
FOP_COND_D(olt, !float64_is_unordered(0, fdt1, fdt0, &env->fpu->fp_status) && float64_lt(fdt0, fdt1, &env->fpu->fp_status))
2816
FOP_COND_D(ult, float64_is_unordered(0, fdt1, fdt0, &env->fpu->fp_status)  || float64_lt(fdt0, fdt1, &env->fpu->fp_status))
2817
FOP_COND_D(ole, !float64_is_unordered(0, fdt1, fdt0, &env->fpu->fp_status) && float64_le(fdt0, fdt1, &env->fpu->fp_status))
2818
FOP_COND_D(ule, float64_is_unordered(0, fdt1, fdt0, &env->fpu->fp_status)  || float64_le(fdt0, fdt1, &env->fpu->fp_status))
2819
/* NOTE: the comma operator will make "cond" to eval to false,
2820
 * but float*_is_unordered() is still called. */
2821
FOP_COND_D(sf,  (float64_is_unordered(1, fdt1, fdt0, &env->fpu->fp_status), 0))
2822
FOP_COND_D(ngle,float64_is_unordered(1, fdt1, fdt0, &env->fpu->fp_status))
2823
FOP_COND_D(seq, !float64_is_unordered(1, fdt1, fdt0, &env->fpu->fp_status) && float64_eq(fdt0, fdt1, &env->fpu->fp_status))
2824
FOP_COND_D(ngl, float64_is_unordered(1, fdt1, fdt0, &env->fpu->fp_status)  || float64_eq(fdt0, fdt1, &env->fpu->fp_status))
2825
FOP_COND_D(lt,  !float64_is_unordered(1, fdt1, fdt0, &env->fpu->fp_status) && float64_lt(fdt0, fdt1, &env->fpu->fp_status))
2826
FOP_COND_D(nge, float64_is_unordered(1, fdt1, fdt0, &env->fpu->fp_status)  || float64_lt(fdt0, fdt1, &env->fpu->fp_status))
2827
FOP_COND_D(le,  !float64_is_unordered(1, fdt1, fdt0, &env->fpu->fp_status) && float64_le(fdt0, fdt1, &env->fpu->fp_status))
2828
FOP_COND_D(ngt, float64_is_unordered(1, fdt1, fdt0, &env->fpu->fp_status)  || float64_le(fdt0, fdt1, &env->fpu->fp_status))
2829

    
2830
#define FOP_COND_S(op, cond)                                   \
2831
void do_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
2832
{                                                              \
2833
    int c = cond;                                              \
2834
    update_fcr31();                                            \
2835
    if (c)                                                     \
2836
        SET_FP_COND(cc, env->fpu);                             \
2837
    else                                                       \
2838
        CLEAR_FP_COND(cc, env->fpu);                           \
2839
}                                                              \
2840
void do_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2841
{                                                              \
2842
    int c;                                                     \
2843
    fst0 = float32_abs(fst0);                                  \
2844
    fst1 = float32_abs(fst1);                                  \
2845
    c = cond;                                                  \
2846
    update_fcr31();                                            \
2847
    if (c)                                                     \
2848
        SET_FP_COND(cc, env->fpu);                             \
2849
    else                                                       \
2850
        CLEAR_FP_COND(cc, env->fpu);                           \
2851
}
2852

    
2853
flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
2854
{
2855
    if (float32_is_signaling_nan(a) ||
2856
        float32_is_signaling_nan(b) ||
2857
        (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
2858
        float_raise(float_flag_invalid, status);
2859
        return 1;
2860
    } else if (float32_is_nan(a) || float32_is_nan(b)) {
2861
        return 1;
2862
    } else {
2863
        return 0;
2864
    }
2865
}
2866

    
2867
/* NOTE: the comma operator will make "cond" to eval to false,
2868
 * but float*_is_unordered() is still called. */
2869
FOP_COND_S(f,   (float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status), 0))
2870
FOP_COND_S(un,  float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status))
2871
FOP_COND_S(eq,  !float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status) && float32_eq(fst0, fst1, &env->fpu->fp_status))
2872
FOP_COND_S(ueq, float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status)  || float32_eq(fst0, fst1, &env->fpu->fp_status))
2873
FOP_COND_S(olt, !float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status) && float32_lt(fst0, fst1, &env->fpu->fp_status))
2874
FOP_COND_S(ult, float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status)  || float32_lt(fst0, fst1, &env->fpu->fp_status))
2875
FOP_COND_S(ole, !float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status) && float32_le(fst0, fst1, &env->fpu->fp_status))
2876
FOP_COND_S(ule, float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status)  || float32_le(fst0, fst1, &env->fpu->fp_status))
2877
/* NOTE: the comma operator will make "cond" to eval to false,
2878
 * but float*_is_unordered() is still called. */
2879
FOP_COND_S(sf,  (float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status), 0))
2880
FOP_COND_S(ngle,float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status))
2881
FOP_COND_S(seq, !float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status) && float32_eq(fst0, fst1, &env->fpu->fp_status))
2882
FOP_COND_S(ngl, float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status)  || float32_eq(fst0, fst1, &env->fpu->fp_status))
2883
FOP_COND_S(lt,  !float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status) && float32_lt(fst0, fst1, &env->fpu->fp_status))
2884
FOP_COND_S(nge, float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status)  || float32_lt(fst0, fst1, &env->fpu->fp_status))
2885
FOP_COND_S(le,  !float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status) && float32_le(fst0, fst1, &env->fpu->fp_status))
2886
FOP_COND_S(ngt, float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status)  || float32_le(fst0, fst1, &env->fpu->fp_status))
2887

    
2888
#define FOP_COND_PS(op, condl, condh)                           \
2889
void do_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2890
{                                                               \
2891
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2892
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
2893
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
2894
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
2895
    int cl = condl;                                             \
2896
    int ch = condh;                                             \
2897
                                                                \
2898
    update_fcr31();                                             \
2899
    if (cl)                                                     \
2900
        SET_FP_COND(cc, env->fpu);                              \
2901
    else                                                        \
2902
        CLEAR_FP_COND(cc, env->fpu);                            \
2903
    if (ch)                                                     \
2904
        SET_FP_COND(cc + 1, env->fpu);                          \
2905
    else                                                        \
2906
        CLEAR_FP_COND(cc + 1, env->fpu);                        \
2907
}                                                               \
2908
void do_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2909
{                                                               \
2910
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2911
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
2912
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
2913
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
2914
    int cl = condl;                                             \
2915
    int ch = condh;                                             \
2916
                                                                \
2917
    update_fcr31();                                             \
2918
    if (cl)                                                     \
2919
        SET_FP_COND(cc, env->fpu);                              \
2920
    else                                                        \
2921
        CLEAR_FP_COND(cc, env->fpu);                            \
2922
    if (ch)                                                     \
2923
        SET_FP_COND(cc + 1, env->fpu);                          \
2924
    else                                                        \
2925
        CLEAR_FP_COND(cc + 1, env->fpu);                        \
2926
}
2927

    
2928
/* NOTE: the comma operator will make "cond" to eval to false,
2929
 * but float*_is_unordered() is still called. */
2930
FOP_COND_PS(f,   (float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status), 0),
2931
                 (float32_is_unordered(0, fsth1, fsth0, &env->fpu->fp_status), 0))
2932
FOP_COND_PS(un,  float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status),
2933
                 float32_is_unordered(0, fsth1, fsth0, &env->fpu->fp_status))
2934
FOP_COND_PS(eq,  !float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status)   && float32_eq(fst0, fst1, &env->fpu->fp_status),
2935
                 !float32_is_unordered(0, fsth1, fsth0, &env->fpu->fp_status) && float32_eq(fsth0, fsth1, &env->fpu->fp_status))
2936
FOP_COND_PS(ueq, float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status)    || float32_eq(fst0, fst1, &env->fpu->fp_status),
2937
                 float32_is_unordered(0, fsth1, fsth0, &env->fpu->fp_status)  || float32_eq(fsth0, fsth1, &env->fpu->fp_status))
2938
FOP_COND_PS(olt, !float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status)   && float32_lt(fst0, fst1, &env->fpu->fp_status),
2939
                 !float32_is_unordered(0, fsth1, fsth0, &env->fpu->fp_status) && float32_lt(fsth0, fsth1, &env->fpu->fp_status))
2940
FOP_COND_PS(ult, float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status)    || float32_lt(fst0, fst1, &env->fpu->fp_status),
2941
                 float32_is_unordered(0, fsth1, fsth0, &env->fpu->fp_status)  || float32_lt(fsth0, fsth1, &env->fpu->fp_status))
2942
FOP_COND_PS(ole, !float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status)   && float32_le(fst0, fst1, &env->fpu->fp_status),
2943
                 !float32_is_unordered(0, fsth1, fsth0, &env->fpu->fp_status) && float32_le(fsth0, fsth1, &env->fpu->fp_status))
2944
FOP_COND_PS(ule, float32_is_unordered(0, fst1, fst0, &env->fpu->fp_status)    || float32_le(fst0, fst1, &env->fpu->fp_status),
2945
                 float32_is_unordered(0, fsth1, fsth0, &env->fpu->fp_status)  || float32_le(fsth0, fsth1, &env->fpu->fp_status))
2946
/* NOTE: the comma operator will make "cond" to eval to false,
2947
 * but float*_is_unordered() is still called. */
2948
FOP_COND_PS(sf,  (float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status), 0),
2949
                 (float32_is_unordered(1, fsth1, fsth0, &env->fpu->fp_status), 0))
2950
FOP_COND_PS(ngle,float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status),
2951
                 float32_is_unordered(1, fsth1, fsth0, &env->fpu->fp_status))
2952
FOP_COND_PS(seq, !float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status)   && float32_eq(fst0, fst1, &env->fpu->fp_status),
2953
                 !float32_is_unordered(1, fsth1, fsth0, &env->fpu->fp_status) && float32_eq(fsth0, fsth1, &env->fpu->fp_status))
2954
FOP_COND_PS(ngl, float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status)    || float32_eq(fst0, fst1, &env->fpu->fp_status),
2955
                 float32_is_unordered(1, fsth1, fsth0, &env->fpu->fp_status)  || float32_eq(fsth0, fsth1, &env->fpu->fp_status))
2956
FOP_COND_PS(lt,  !float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status)   && float32_lt(fst0, fst1, &env->fpu->fp_status),
2957
                 !float32_is_unordered(1, fsth1, fsth0, &env->fpu->fp_status) && float32_lt(fsth0, fsth1, &env->fpu->fp_status))
2958
FOP_COND_PS(nge, float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status)    || float32_lt(fst0, fst1, &env->fpu->fp_status),
2959
                 float32_is_unordered(1, fsth1, fsth0, &env->fpu->fp_status)  || float32_lt(fsth0, fsth1, &env->fpu->fp_status))
2960
FOP_COND_PS(le,  !float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status)   && float32_le(fst0, fst1, &env->fpu->fp_status),
2961
                 !float32_is_unordered(1, fsth1, fsth0, &env->fpu->fp_status) && float32_le(fsth0, fsth1, &env->fpu->fp_status))
2962
FOP_COND_PS(ngt, float32_is_unordered(1, fst1, fst0, &env->fpu->fp_status)    || float32_le(fst0, fst1, &env->fpu->fp_status),
2963
                 float32_is_unordered(1, fsth1, fsth0, &env->fpu->fp_status)  || float32_le(fsth0, fsth1, &env->fpu->fp_status))