Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ e18231a3

History | View | Annotate | Download (89.4 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdlib.h>
21
#include "exec.h"
22

    
23
#include "host-utils.h"
24

    
25
/*****************************************************************************/
26
/* Exceptions processing helpers */
27

    
28
void do_raise_exception_err (uint32_t exception, int error_code)
29
{
30
#if 1
31
    if (logfile && exception < 0x100)
32
        fprintf(logfile, "%s: %d %d\n", __func__, exception, error_code);
33
#endif
34
    env->exception_index = exception;
35
    env->error_code = error_code;
36
    cpu_loop_exit();
37
}
38

    
39
void do_raise_exception (uint32_t exception)
40
{
41
    do_raise_exception_err(exception, 0);
42
}
43

    
44
void do_interrupt_restart (void)
45
{
46
    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
47
        !(env->CP0_Status & (1 << CP0St_ERL)) &&
48
        !(env->hflags & MIPS_HFLAG_DM) &&
49
        (env->CP0_Status & (1 << CP0St_IE)) &&
50
        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
51
        env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
52
        do_raise_exception(EXCP_EXT_INTERRUPT);
53
    }
54
}
55

    
56
void do_restore_state (void *pc_ptr)
57
{
58
    TranslationBlock *tb;
59
    unsigned long pc = (unsigned long) pc_ptr;
60
    
61
    tb = tb_find_pc (pc);
62
    if (tb) {
63
        cpu_restore_state (tb, env, pc, NULL);
64
    }
65
}
66

    
67
target_ulong do_clo (target_ulong t0)
68
{
69
    return clo32(t0);
70
}
71

    
72
target_ulong do_clz (target_ulong t0)
73
{
74
    return clz32(t0);
75
}
76

    
77
#if defined(TARGET_MIPS64)
78
target_ulong do_dclo (target_ulong t0)
79
{
80
    return clo64(t0);
81
}
82

    
83
target_ulong do_dclz (target_ulong t0)
84
{
85
    return clz64(t0);
86
}
87
#endif /* TARGET_MIPS64 */
88

    
89
/* 64 bits arithmetic for 32 bits hosts */
90
static inline uint64_t get_HILO (void)
91
{
92
    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
93
}
94

    
95
static inline void set_HILO (uint64_t HILO)
96
{
97
    env->active_tc.LO[0] = (int32_t)HILO;
98
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
99
}
100

    
101
static inline void set_HIT0_LO (target_ulong t0, uint64_t HILO)
102
{
103
    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
104
    t0 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
105
}
106

    
107
static inline void set_HI_LOT0 (target_ulong t0, uint64_t HILO)
108
{
109
    t0 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
110
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
111
}
112

    
113
#if TARGET_LONG_BITS > HOST_LONG_BITS
114
void do_madd (target_ulong t0, target_ulong t1)
115
{
116
    int64_t tmp;
117

    
118
    tmp = ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
119
    set_HILO((int64_t)get_HILO() + tmp);
120
}
121

    
122
void do_maddu (target_ulong t0, target_ulong t1)
123
{
124
    uint64_t tmp;
125

    
126
    tmp = ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
127
    set_HILO(get_HILO() + tmp);
128
}
129

    
130
void do_msub (target_ulong t0, target_ulong t1)
131
{
132
    int64_t tmp;
133

    
134
    tmp = ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
135
    set_HILO((int64_t)get_HILO() - tmp);
136
}
137

    
138
void do_msubu (target_ulong t0, target_ulong t1)
139
{
140
    uint64_t tmp;
141

    
142
    tmp = ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
143
    set_HILO(get_HILO() - tmp);
144
}
145
#endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
146

    
147
/* Multiplication variants of the vr54xx. */
148
target_ulong do_muls (target_ulong t0, target_ulong t1)
149
{
150
    set_HI_LOT0(t0, 0 - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
151

    
152
    return t0;
153
}
154

    
155
target_ulong do_mulsu (target_ulong t0, target_ulong t1)
156
{
157
    set_HI_LOT0(t0, 0 - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
158

    
159
    return t0;
160
}
161

    
162
target_ulong do_macc (target_ulong t0, target_ulong t1)
163
{
164
    set_HI_LOT0(t0, ((int64_t)get_HILO()) + ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
165

    
166
    return t0;
167
}
168

    
169
target_ulong do_macchi (target_ulong t0, target_ulong t1)
170
{
171
    set_HIT0_LO(t0, ((int64_t)get_HILO()) + ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
172

    
173
    return t0;
174
}
175

    
176
target_ulong do_maccu (target_ulong t0, target_ulong t1)
177
{
178
    set_HI_LOT0(t0, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
179

    
180
    return t0;
181
}
182

    
183
target_ulong do_macchiu (target_ulong t0, target_ulong t1)
184
{
185
    set_HIT0_LO(t0, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
186

    
187
    return t0;
188
}
189

    
190
target_ulong do_msac (target_ulong t0, target_ulong t1)
191
{
192
    set_HI_LOT0(t0, ((int64_t)get_HILO()) - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
193

    
194
    return t0;
195
}
196

    
197
target_ulong do_msachi (target_ulong t0, target_ulong t1)
198
{
199
    set_HIT0_LO(t0, ((int64_t)get_HILO()) - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
200

    
201
    return t0;
202
}
203

    
204
target_ulong do_msacu (target_ulong t0, target_ulong t1)
205
{
206
    set_HI_LOT0(t0, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
207

    
208
    return t0;
209
}
210

    
211
target_ulong do_msachiu (target_ulong t0, target_ulong t1)
212
{
213
    set_HIT0_LO(t0, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
214

    
215
    return t0;
216
}
217

    
218
target_ulong do_mulhi (target_ulong t0, target_ulong t1)
219
{
220
    set_HIT0_LO(t0, (int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
221

    
222
    return t0;
223
}
224

    
225
target_ulong do_mulhiu (target_ulong t0, target_ulong t1)
226
{
227
    set_HIT0_LO(t0, (uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
228

    
229
    return t0;
230
}
231

    
232
target_ulong do_mulshi (target_ulong t0, target_ulong t1)
233
{
234
    set_HIT0_LO(t0, 0 - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
235

    
236
    return t0;
237
}
238

    
239
target_ulong do_mulshiu (target_ulong t0, target_ulong t1)
240
{
241
    set_HIT0_LO(t0, 0 - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
242

    
243
    return t0;
244
}
245

    
246
#ifdef TARGET_MIPS64
247
void do_dmult (target_ulong t0, target_ulong t1)
248
{
249
    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), t0, t1);
250
}
251

    
252
void do_dmultu (target_ulong t0, target_ulong t1)
253
{
254
    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), t0, t1);
255
}
256
#endif
257

    
258
#ifdef TARGET_WORDS_BIGENDIAN
259
#define GET_LMASK(v) ((v) & 3)
260
#define GET_OFFSET(addr, offset) (addr + (offset))
261
#else
262
#define GET_LMASK(v) (((v) & 3) ^ 3)
263
#define GET_OFFSET(addr, offset) (addr - (offset))
264
#endif
265

    
266
target_ulong do_lwl(target_ulong t0, target_ulong t1, int mem_idx)
267
{
268
    target_ulong tmp;
269

    
270
#ifdef CONFIG_USER_ONLY
271
#define ldfun ldub_raw
272
#else
273
    int (*ldfun)(target_ulong);
274

    
275
    switch (mem_idx)
276
    {
277
    case 0: ldfun = ldub_kernel; break;
278
    case 1: ldfun = ldub_super; break;
279
    default:
280
    case 2: ldfun = ldub_user; break;
281
    }
282
#endif
283
    tmp = ldfun(t0);
284
    t1 = (t1 & 0x00FFFFFF) | (tmp << 24);
285

    
286
    if (GET_LMASK(t0) <= 2) {
287
        tmp = ldfun(GET_OFFSET(t0, 1));
288
        t1 = (t1 & 0xFF00FFFF) | (tmp << 16);
289
    }
290

    
291
    if (GET_LMASK(t0) <= 1) {
292
        tmp = ldfun(GET_OFFSET(t0, 2));
293
        t1 = (t1 & 0xFFFF00FF) | (tmp << 8);
294
    }
295

    
296
    if (GET_LMASK(t0) == 0) {
297
        tmp = ldfun(GET_OFFSET(t0, 3));
298
        t1 = (t1 & 0xFFFFFF00) | tmp;
299
    }
300
    return (int32_t)t1;
301
}
302

    
303
target_ulong do_lwr(target_ulong t0, target_ulong t1, int mem_idx)
304
{
305
    target_ulong tmp;
306

    
307
#ifdef CONFIG_USER_ONLY
308
#define ldfun ldub_raw
309
#else
310
    int (*ldfun)(target_ulong);
311

    
312
    switch (mem_idx)
313
    {
314
    case 0: ldfun = ldub_kernel; break;
315
    case 1: ldfun = ldub_super; break;
316
    default:
317
    case 2: ldfun = ldub_user; break;
318
    }
319
#endif
320
    tmp = ldfun(t0);
321
    t1 = (t1 & 0xFFFFFF00) | tmp;
322

    
323
    if (GET_LMASK(t0) >= 1) {
324
        tmp = ldfun(GET_OFFSET(t0, -1));
325
        t1 = (t1 & 0xFFFF00FF) | (tmp << 8);
326
    }
327

    
328
    if (GET_LMASK(t0) >= 2) {
329
        tmp = ldfun(GET_OFFSET(t0, -2));
330
        t1 = (t1 & 0xFF00FFFF) | (tmp << 16);
331
    }
332

    
333
    if (GET_LMASK(t0) == 3) {
334
        tmp = ldfun(GET_OFFSET(t0, -3));
335
        t1 = (t1 & 0x00FFFFFF) | (tmp << 24);
336
    }
337
    return (int32_t)t1;
338
}
339

    
340
void do_swl(target_ulong t0, target_ulong t1, int mem_idx)
341
{
342
#ifdef CONFIG_USER_ONLY
343
#define stfun stb_raw
344
#else
345
    void (*stfun)(target_ulong, int);
346

    
347
    switch (mem_idx)
348
    {
349
    case 0: stfun = stb_kernel; break;
350
    case 1: stfun = stb_super; break;
351
    default:
352
    case 2: stfun = stb_user; break;
353
    }
354
#endif
355
    stfun(t0, (uint8_t)(t1 >> 24));
356

    
357
    if (GET_LMASK(t0) <= 2)
358
        stfun(GET_OFFSET(t0, 1), (uint8_t)(t1 >> 16));
359

    
360
    if (GET_LMASK(t0) <= 1)
361
        stfun(GET_OFFSET(t0, 2), (uint8_t)(t1 >> 8));
362

    
363
    if (GET_LMASK(t0) == 0)
364
        stfun(GET_OFFSET(t0, 3), (uint8_t)t1);
365
}
366

    
367
void do_swr(target_ulong t0, target_ulong t1, int mem_idx)
368
{
369
#ifdef CONFIG_USER_ONLY
370
#define stfun stb_raw
371
#else
372
    void (*stfun)(target_ulong, int);
373

    
374
    switch (mem_idx)
375
    {
376
    case 0: stfun = stb_kernel; break;
377
    case 1: stfun = stb_super; break;
378
    default:
379
    case 2: stfun = stb_user; break;
380
    }
381
#endif
382
    stfun(t0, (uint8_t)t1);
383

    
384
    if (GET_LMASK(t0) >= 1)
385
        stfun(GET_OFFSET(t0, -1), (uint8_t)(t1 >> 8));
386

    
387
    if (GET_LMASK(t0) >= 2)
388
        stfun(GET_OFFSET(t0, -2), (uint8_t)(t1 >> 16));
389

    
390
    if (GET_LMASK(t0) == 3)
391
        stfun(GET_OFFSET(t0, -3), (uint8_t)(t1 >> 24));
392
}
393

    
394
#if defined(TARGET_MIPS64)
395
/* "half" load and stores.  We must do the memory access inline,
396
   or fault handling won't work.  */
397

    
398
#ifdef TARGET_WORDS_BIGENDIAN
399
#define GET_LMASK64(v) ((v) & 7)
400
#else
401
#define GET_LMASK64(v) (((v) & 7) ^ 7)
402
#endif
403

    
404
target_ulong do_ldl(target_ulong t0, target_ulong t1, int mem_idx)
405
{
406
    uint64_t tmp;
407

    
408
#ifdef CONFIG_USER_ONLY
409
#define ldfun ldub_raw
410
#else
411
    int (*ldfun)(target_ulong);
412

    
413
    switch (mem_idx)
414
    {
415
    case 0: ldfun = ldub_kernel; break;
416
    case 1: ldfun = ldub_super; break;
417
    default:
418
    case 2: ldfun = ldub_user; break;
419
    }
420
#endif
421
    tmp = ldfun(t0);
422
    t1 = (t1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
423

    
424
    if (GET_LMASK64(t0) <= 6) {
425
        tmp = ldfun(GET_OFFSET(t0, 1));
426
        t1 = (t1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
427
    }
428

    
429
    if (GET_LMASK64(t0) <= 5) {
430
        tmp = ldfun(GET_OFFSET(t0, 2));
431
        t1 = (t1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
432
    }
433

    
434
    if (GET_LMASK64(t0) <= 4) {
435
        tmp = ldfun(GET_OFFSET(t0, 3));
436
        t1 = (t1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
437
    }
438

    
439
    if (GET_LMASK64(t0) <= 3) {
440
        tmp = ldfun(GET_OFFSET(t0, 4));
441
        t1 = (t1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
442
    }
443

    
444
    if (GET_LMASK64(t0) <= 2) {
445
        tmp = ldfun(GET_OFFSET(t0, 5));
446
        t1 = (t1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
447
    }
448

    
449
    if (GET_LMASK64(t0) <= 1) {
450
        tmp = ldfun(GET_OFFSET(t0, 6));
451
        t1 = (t1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
452
    }
453

    
454
    if (GET_LMASK64(t0) == 0) {
455
        tmp = ldfun(GET_OFFSET(t0, 7));
456
        t1 = (t1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
457
    }
458

    
459
    return t1;
460
}
461

    
462
target_ulong do_ldr(target_ulong t0, target_ulong t1, int mem_idx)
463
{
464
    uint64_t tmp;
465

    
466
#ifdef CONFIG_USER_ONLY
467
#define ldfun ldub_raw
468
#else
469
    int (*ldfun)(target_ulong);
470

    
471
    switch (mem_idx)
472
    {
473
    case 0: ldfun = ldub_kernel; break;
474
    case 1: ldfun = ldub_super; break;
475
    default:
476
    case 2: ldfun = ldub_user; break;
477
    }
478
#endif
479
    tmp = ldfun(t0);
480
    t1 = (t1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
481

    
482
    if (GET_LMASK64(t0) >= 1) {
483
        tmp = ldfun(GET_OFFSET(t0, -1));
484
        t1 = (t1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
485
    }
486

    
487
    if (GET_LMASK64(t0) >= 2) {
488
        tmp = ldfun(GET_OFFSET(t0, -2));
489
        t1 = (t1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
490
    }
491

    
492
    if (GET_LMASK64(t0) >= 3) {
493
        tmp = ldfun(GET_OFFSET(t0, -3));
494
        t1 = (t1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
495
    }
496

    
497
    if (GET_LMASK64(t0) >= 4) {
498
        tmp = ldfun(GET_OFFSET(t0, -4));
499
        t1 = (t1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
500
    }
501

    
502
    if (GET_LMASK64(t0) >= 5) {
503
        tmp = ldfun(GET_OFFSET(t0, -5));
504
        t1 = (t1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
505
    }
506

    
507
    if (GET_LMASK64(t0) >= 6) {
508
        tmp = ldfun(GET_OFFSET(t0, -6));
509
        t1 = (t1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
510
    }
511

    
512
    if (GET_LMASK64(t0) == 7) {
513
        tmp = ldfun(GET_OFFSET(t0, -7));
514
        t1 = (t1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
515
    }
516

    
517
    return t1;
518
}
519

    
520
void do_sdl(target_ulong t0, target_ulong t1, int mem_idx)
521
{
522
#ifdef CONFIG_USER_ONLY
523
#define stfun stb_raw
524
#else
525
    void (*stfun)(target_ulong, int);
526

    
527
    switch (mem_idx)
528
    {
529
    case 0: stfun = stb_kernel; break;
530
    case 1: stfun = stb_super; break;
531
    default:
532
    case 2: stfun = stb_user; break;
533
    }
534
#endif
535
    stfun(t0, (uint8_t)(t1 >> 56));
536

    
537
    if (GET_LMASK64(t0) <= 6)
538
        stfun(GET_OFFSET(t0, 1), (uint8_t)(t1 >> 48));
539

    
540
    if (GET_LMASK64(t0) <= 5)
541
        stfun(GET_OFFSET(t0, 2), (uint8_t)(t1 >> 40));
542

    
543
    if (GET_LMASK64(t0) <= 4)
544
        stfun(GET_OFFSET(t0, 3), (uint8_t)(t1 >> 32));
545

    
546
    if (GET_LMASK64(t0) <= 3)
547
        stfun(GET_OFFSET(t0, 4), (uint8_t)(t1 >> 24));
548

    
549
    if (GET_LMASK64(t0) <= 2)
550
        stfun(GET_OFFSET(t0, 5), (uint8_t)(t1 >> 16));
551

    
552
    if (GET_LMASK64(t0) <= 1)
553
        stfun(GET_OFFSET(t0, 6), (uint8_t)(t1 >> 8));
554

    
555
    if (GET_LMASK64(t0) <= 0)
556
        stfun(GET_OFFSET(t0, 7), (uint8_t)t1);
557
}
558

    
559
void do_sdr(target_ulong t0, target_ulong t1, int mem_idx)
560
{
561
#ifdef CONFIG_USER_ONLY
562
#define stfun stb_raw
563
#else
564
    void (*stfun)(target_ulong, int);
565

    
566
    switch (mem_idx)
567
    {
568
    case 0: stfun = stb_kernel; break;
569
    case 1: stfun = stb_super; break;
570
     default:
571
    case 2: stfun = stb_user; break;
572
    }
573
#endif
574
    stfun(t0, (uint8_t)t1);
575

    
576
    if (GET_LMASK64(t0) >= 1)
577
        stfun(GET_OFFSET(t0, -1), (uint8_t)(t1 >> 8));
578

    
579
    if (GET_LMASK64(t0) >= 2)
580
        stfun(GET_OFFSET(t0, -2), (uint8_t)(t1 >> 16));
581

    
582
    if (GET_LMASK64(t0) >= 3)
583
        stfun(GET_OFFSET(t0, -3), (uint8_t)(t1 >> 24));
584

    
585
    if (GET_LMASK64(t0) >= 4)
586
        stfun(GET_OFFSET(t0, -4), (uint8_t)(t1 >> 32));
587

    
588
    if (GET_LMASK64(t0) >= 5)
589
        stfun(GET_OFFSET(t0, -5), (uint8_t)(t1 >> 40));
590

    
591
    if (GET_LMASK64(t0) >= 6)
592
        stfun(GET_OFFSET(t0, -6), (uint8_t)(t1 >> 48));
593

    
594
    if (GET_LMASK64(t0) == 7)
595
        stfun(GET_OFFSET(t0, -7), (uint8_t)(t1 >> 56));
596
}
597
#endif /* TARGET_MIPS64 */
598

    
599
#ifndef CONFIG_USER_ONLY
600
/* CP0 helpers */
601
target_ulong do_mfc0_mvpcontrol (void)
602
{
603
    return env->mvp->CP0_MVPControl;
604
}
605

    
606
target_ulong do_mfc0_mvpconf0 (void)
607
{
608
    return env->mvp->CP0_MVPConf0;
609
}
610

    
611
target_ulong do_mfc0_mvpconf1 (void)
612
{
613
    return env->mvp->CP0_MVPConf1;
614
}
615

    
616
target_ulong do_mfc0_random (void)
617
{
618
    return (int32_t)cpu_mips_get_random(env);
619
}
620

    
621
target_ulong do_mfc0_tcstatus (void)
622
{
623
    return env->active_tc.CP0_TCStatus;
624
}
625

    
626
target_ulong do_mftc0_tcstatus(void)
627
{
628
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
629

    
630
    if (other_tc == env->current_tc)
631
        return env->active_tc.CP0_TCStatus;
632
    else
633
        return env->tcs[other_tc].CP0_TCStatus;
634
}
635

    
636
target_ulong do_mfc0_tcbind (void)
637
{
638
    return env->active_tc.CP0_TCBind;
639
}
640

    
641
target_ulong do_mftc0_tcbind(void)
642
{
643
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
644

    
645
    if (other_tc == env->current_tc)
646
        return env->active_tc.CP0_TCBind;
647
    else
648
        return env->tcs[other_tc].CP0_TCBind;
649
}
650

    
651
target_ulong do_mfc0_tcrestart (void)
652
{
653
    return env->active_tc.PC;
654
}
655

    
656
target_ulong do_mftc0_tcrestart(void)
657
{
658
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
659

    
660
    if (other_tc == env->current_tc)
661
        return env->active_tc.PC;
662
    else
663
        return env->tcs[other_tc].PC;
664
}
665

    
666
target_ulong do_mfc0_tchalt (void)
667
{
668
    return env->active_tc.CP0_TCHalt;
669
}
670

    
671
target_ulong do_mftc0_tchalt(void)
672
{
673
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
674

    
675
    if (other_tc == env->current_tc)
676
        return env->active_tc.CP0_TCHalt;
677
    else
678
        return env->tcs[other_tc].CP0_TCHalt;
679
}
680

    
681
target_ulong do_mfc0_tccontext (void)
682
{
683
    return env->active_tc.CP0_TCContext;
684
}
685

    
686
target_ulong do_mftc0_tccontext(void)
687
{
688
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
689

    
690
    if (other_tc == env->current_tc)
691
        return env->active_tc.CP0_TCContext;
692
    else
693
        return env->tcs[other_tc].CP0_TCContext;
694
}
695

    
696
target_ulong do_mfc0_tcschedule (void)
697
{
698
    return env->active_tc.CP0_TCSchedule;
699
}
700

    
701
target_ulong do_mftc0_tcschedule(void)
702
{
703
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
704

    
705
    if (other_tc == env->current_tc)
706
        return env->active_tc.CP0_TCSchedule;
707
    else
708
        return env->tcs[other_tc].CP0_TCSchedule;
709
}
710

    
711
target_ulong do_mfc0_tcschefback (void)
712
{
713
    return env->active_tc.CP0_TCScheFBack;
714
}
715

    
716
target_ulong do_mftc0_tcschefback(void)
717
{
718
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
719

    
720
    if (other_tc == env->current_tc)
721
        return env->active_tc.CP0_TCScheFBack;
722
    else
723
        return env->tcs[other_tc].CP0_TCScheFBack;
724
}
725

    
726
target_ulong do_mfc0_count (void)
727
{
728
    return (int32_t)cpu_mips_get_count(env);
729
}
730

    
731
target_ulong do_mftc0_entryhi(void)
732
{
733
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
734
    int32_t tcstatus;
735

    
736
    if (other_tc == env->current_tc)
737
        tcstatus = env->active_tc.CP0_TCStatus;
738
    else
739
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
740

    
741
    return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
742
}
743

    
744
target_ulong do_mftc0_status(void)
745
{
746
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
747
    target_ulong t0;
748
    int32_t tcstatus;
749

    
750
    if (other_tc == env->current_tc)
751
        tcstatus = env->active_tc.CP0_TCStatus;
752
    else
753
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
754

    
755
    t0 = env->CP0_Status & ~0xf1000018;
756
    t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
757
    t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
758
    t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
759

    
760
    return t0;
761
}
762

    
763
target_ulong do_mfc0_lladdr (void)
764
{
765
    return (int32_t)env->CP0_LLAddr >> 4;
766
}
767

    
768
target_ulong do_mfc0_watchlo (uint32_t sel)
769
{
770
    return (int32_t)env->CP0_WatchLo[sel];
771
}
772

    
773
target_ulong do_mfc0_watchhi (uint32_t sel)
774
{
775
    return env->CP0_WatchHi[sel];
776
}
777

    
778
target_ulong do_mfc0_debug (void)
779
{
780
    target_ulong t0 = env->CP0_Debug;
781
    if (env->hflags & MIPS_HFLAG_DM)
782
        t0 |= 1 << CP0DB_DM;
783

    
784
    return t0;
785
}
786

    
787
target_ulong do_mftc0_debug(void)
788
{
789
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
790
    int32_t tcstatus;
791

    
792
    if (other_tc == env->current_tc)
793
        tcstatus = env->active_tc.CP0_Debug_tcstatus;
794
    else
795
        tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
796

    
797
    /* XXX: Might be wrong, check with EJTAG spec. */
798
    return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
799
            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
800
}
801

    
802
#if defined(TARGET_MIPS64)
803
target_ulong do_dmfc0_tcrestart (void)
804
{
805
    return env->active_tc.PC;
806
}
807

    
808
target_ulong do_dmfc0_tchalt (void)
809
{
810
    return env->active_tc.CP0_TCHalt;
811
}
812

    
813
target_ulong do_dmfc0_tccontext (void)
814
{
815
    return env->active_tc.CP0_TCContext;
816
}
817

    
818
target_ulong do_dmfc0_tcschedule (void)
819
{
820
    return env->active_tc.CP0_TCSchedule;
821
}
822

    
823
target_ulong do_dmfc0_tcschefback (void)
824
{
825
    return env->active_tc.CP0_TCScheFBack;
826
}
827

    
828
target_ulong do_dmfc0_lladdr (void)
829
{
830
    return env->CP0_LLAddr >> 4;
831
}
832

    
833
target_ulong do_dmfc0_watchlo (uint32_t sel)
834
{
835
    return env->CP0_WatchLo[sel];
836
}
837
#endif /* TARGET_MIPS64 */
838

    
839
void do_mtc0_index (target_ulong t0)
840
{
841
    int num = 1;
842
    unsigned int tmp = env->tlb->nb_tlb;
843

    
844
    do {
845
        tmp >>= 1;
846
        num <<= 1;
847
    } while (tmp);
848
    env->CP0_Index = (env->CP0_Index & 0x80000000) | (t0 & (num - 1));
849
}
850

    
851
void do_mtc0_mvpcontrol (target_ulong t0)
852
{
853
    uint32_t mask = 0;
854
    uint32_t newval;
855

    
856
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
857
        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
858
                (1 << CP0MVPCo_EVP);
859
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
860
        mask |= (1 << CP0MVPCo_STLB);
861
    newval = (env->mvp->CP0_MVPControl & ~mask) | (t0 & mask);
862

    
863
    // TODO: Enable/disable shared TLB, enable/disable VPEs.
864

    
865
    env->mvp->CP0_MVPControl = newval;
866
}
867

    
868
void do_mtc0_vpecontrol (target_ulong t0)
869
{
870
    uint32_t mask;
871
    uint32_t newval;
872

    
873
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
874
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
875
    newval = (env->CP0_VPEControl & ~mask) | (t0 & mask);
876

    
877
    /* Yield scheduler intercept not implemented. */
878
    /* Gating storage scheduler intercept not implemented. */
879

    
880
    // TODO: Enable/disable TCs.
881

    
882
    env->CP0_VPEControl = newval;
883
}
884

    
885
void do_mtc0_vpeconf0 (target_ulong t0)
886
{
887
    uint32_t mask = 0;
888
    uint32_t newval;
889

    
890
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
891
        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
892
            mask |= (0xff << CP0VPEC0_XTC);
893
        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
894
    }
895
    newval = (env->CP0_VPEConf0 & ~mask) | (t0 & mask);
896

    
897
    // TODO: TC exclusive handling due to ERL/EXL.
898

    
899
    env->CP0_VPEConf0 = newval;
900
}
901

    
902
void do_mtc0_vpeconf1 (target_ulong t0)
903
{
904
    uint32_t mask = 0;
905
    uint32_t newval;
906

    
907
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
908
        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
909
                (0xff << CP0VPEC1_NCP1);
910
    newval = (env->CP0_VPEConf1 & ~mask) | (t0 & mask);
911

    
912
    /* UDI not implemented. */
913
    /* CP2 not implemented. */
914

    
915
    // TODO: Handle FPU (CP1) binding.
916

    
917
    env->CP0_VPEConf1 = newval;
918
}
919

    
920
void do_mtc0_yqmask (target_ulong t0)
921
{
922
    /* Yield qualifier inputs not implemented. */
923
    env->CP0_YQMask = 0x00000000;
924
}
925

    
926
void do_mtc0_vpeopt (target_ulong t0)
927
{
928
    env->CP0_VPEOpt = t0 & 0x0000ffff;
929
}
930

    
931
void do_mtc0_entrylo0 (target_ulong t0)
932
{
933
    /* Large physaddr (PABITS) not implemented */
934
    /* 1k pages not implemented */
935
    env->CP0_EntryLo0 = t0 & 0x3FFFFFFF;
936
}
937

    
938
void do_mtc0_tcstatus (target_ulong t0)
939
{
940
    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
941
    uint32_t newval;
942

    
943
    newval = (env->active_tc.CP0_TCStatus & ~mask) | (t0 & mask);
944

    
945
    // TODO: Sync with CP0_Status.
946

    
947
    env->active_tc.CP0_TCStatus = newval;
948
}
949

    
950
void do_mttc0_tcstatus (target_ulong t0)
951
{
952
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
953

    
954
    // TODO: Sync with CP0_Status.
955

    
956
    if (other_tc == env->current_tc)
957
        env->active_tc.CP0_TCStatus = t0;
958
    else
959
        env->tcs[other_tc].CP0_TCStatus = t0;
960
}
961

    
962
void do_mtc0_tcbind (target_ulong t0)
963
{
964
    uint32_t mask = (1 << CP0TCBd_TBE);
965
    uint32_t newval;
966

    
967
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
968
        mask |= (1 << CP0TCBd_CurVPE);
969
    newval = (env->active_tc.CP0_TCBind & ~mask) | (t0 & mask);
970
    env->active_tc.CP0_TCBind = newval;
971
}
972

    
973
void do_mttc0_tcbind (target_ulong t0)
974
{
975
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
976
    uint32_t mask = (1 << CP0TCBd_TBE);
977
    uint32_t newval;
978

    
979
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
980
        mask |= (1 << CP0TCBd_CurVPE);
981
    if (other_tc == env->current_tc) {
982
        newval = (env->active_tc.CP0_TCBind & ~mask) | (t0 & mask);
983
        env->active_tc.CP0_TCBind = newval;
984
    } else {
985
        newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (t0 & mask);
986
        env->tcs[other_tc].CP0_TCBind = newval;
987
    }
988
}
989

    
990
void do_mtc0_tcrestart (target_ulong t0)
991
{
992
    env->active_tc.PC = t0;
993
    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
994
    env->CP0_LLAddr = 0ULL;
995
    /* MIPS16 not implemented. */
996
}
997

    
998
void do_mttc0_tcrestart (target_ulong t0)
999
{
1000
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1001

    
1002
    if (other_tc == env->current_tc) {
1003
        env->active_tc.PC = t0;
1004
        env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1005
        env->CP0_LLAddr = 0ULL;
1006
        /* MIPS16 not implemented. */
1007
    } else {
1008
        env->tcs[other_tc].PC = t0;
1009
        env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1010
        env->CP0_LLAddr = 0ULL;
1011
        /* MIPS16 not implemented. */
1012
    }
1013
}
1014

    
1015
void do_mtc0_tchalt (target_ulong t0)
1016
{
1017
    env->active_tc.CP0_TCHalt = t0 & 0x1;
1018

    
1019
    // TODO: Halt TC / Restart (if allocated+active) TC.
1020
}
1021

    
1022
void do_mttc0_tchalt (target_ulong t0)
1023
{
1024
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1025

    
1026
    // TODO: Halt TC / Restart (if allocated+active) TC.
1027

    
1028
    if (other_tc == env->current_tc)
1029
        env->active_tc.CP0_TCHalt = t0;
1030
    else
1031
        env->tcs[other_tc].CP0_TCHalt = t0;
1032
}
1033

    
1034
void do_mtc0_tccontext (target_ulong t0)
1035
{
1036
    env->active_tc.CP0_TCContext = t0;
1037
}
1038

    
1039
void do_mttc0_tccontext (target_ulong t0)
1040
{
1041
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1042

    
1043
    if (other_tc == env->current_tc)
1044
        env->active_tc.CP0_TCContext = t0;
1045
    else
1046
        env->tcs[other_tc].CP0_TCContext = t0;
1047
}
1048

    
1049
void do_mtc0_tcschedule (target_ulong t0)
1050
{
1051
    env->active_tc.CP0_TCSchedule = t0;
1052
}
1053

    
1054
void do_mttc0_tcschedule (target_ulong t0)
1055
{
1056
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1057

    
1058
    if (other_tc == env->current_tc)
1059
        env->active_tc.CP0_TCSchedule = t0;
1060
    else
1061
        env->tcs[other_tc].CP0_TCSchedule = t0;
1062
}
1063

    
1064
void do_mtc0_tcschefback (target_ulong t0)
1065
{
1066
    env->active_tc.CP0_TCScheFBack = t0;
1067
}
1068

    
1069
void do_mttc0_tcschefback (target_ulong t0)
1070
{
1071
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1072

    
1073
    if (other_tc == env->current_tc)
1074
        env->active_tc.CP0_TCScheFBack = t0;
1075
    else
1076
        env->tcs[other_tc].CP0_TCScheFBack = t0;
1077
}
1078

    
1079
void do_mtc0_entrylo1 (target_ulong t0)
1080
{
1081
    /* Large physaddr (PABITS) not implemented */
1082
    /* 1k pages not implemented */
1083
    env->CP0_EntryLo1 = t0 & 0x3FFFFFFF;
1084
}
1085

    
1086
void do_mtc0_context (target_ulong t0)
1087
{
1088
    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (t0 & ~0x007FFFFF);
1089
}
1090

    
1091
void do_mtc0_pagemask (target_ulong t0)
1092
{
1093
    /* 1k pages not implemented */
1094
    env->CP0_PageMask = t0 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1095
}
1096

    
1097
void do_mtc0_pagegrain (target_ulong t0)
1098
{
1099
    /* SmartMIPS not implemented */
1100
    /* Large physaddr (PABITS) not implemented */
1101
    /* 1k pages not implemented */
1102
    env->CP0_PageGrain = 0;
1103
}
1104

    
1105
void do_mtc0_wired (target_ulong t0)
1106
{
1107
    env->CP0_Wired = t0 % env->tlb->nb_tlb;
1108
}
1109

    
1110
void do_mtc0_srsconf0 (target_ulong t0)
1111
{
1112
    env->CP0_SRSConf0 |= t0 & env->CP0_SRSConf0_rw_bitmask;
1113
}
1114

    
1115
void do_mtc0_srsconf1 (target_ulong t0)
1116
{
1117
    env->CP0_SRSConf1 |= t0 & env->CP0_SRSConf1_rw_bitmask;
1118
}
1119

    
1120
void do_mtc0_srsconf2 (target_ulong t0)
1121
{
1122
    env->CP0_SRSConf2 |= t0 & env->CP0_SRSConf2_rw_bitmask;
1123
}
1124

    
1125
void do_mtc0_srsconf3 (target_ulong t0)
1126
{
1127
    env->CP0_SRSConf3 |= t0 & env->CP0_SRSConf3_rw_bitmask;
1128
}
1129

    
1130
void do_mtc0_srsconf4 (target_ulong t0)
1131
{
1132
    env->CP0_SRSConf4 |= t0 & env->CP0_SRSConf4_rw_bitmask;
1133
}
1134

    
1135
void do_mtc0_hwrena (target_ulong t0)
1136
{
1137
    env->CP0_HWREna = t0 & 0x0000000F;
1138
}
1139

    
1140
void do_mtc0_count (target_ulong t0)
1141
{
1142
    cpu_mips_store_count(env, t0);
1143
}
1144

    
1145
void do_mtc0_entryhi (target_ulong t0)
1146
{
1147
    target_ulong old, val;
1148

    
1149
    /* 1k pages not implemented */
1150
    val = t0 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1151
#if defined(TARGET_MIPS64)
1152
    val &= env->SEGMask;
1153
#endif
1154
    old = env->CP0_EntryHi;
1155
    env->CP0_EntryHi = val;
1156
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1157
        uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
1158
        env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
1159
    }
1160
    /* If the ASID changes, flush qemu's TLB.  */
1161
    if ((old & 0xFF) != (val & 0xFF))
1162
        cpu_mips_tlb_flush(env, 1);
1163
}
1164

    
1165
void do_mttc0_entryhi(target_ulong t0)
1166
{
1167
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1168
    int32_t tcstatus;
1169

    
1170
    env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (t0 & ~0xff);
1171
    if (other_tc == env->current_tc) {
1172
        tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (t0 & 0xff);
1173
        env->active_tc.CP0_TCStatus = tcstatus;
1174
    } else {
1175
        tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (t0 & 0xff);
1176
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1177
    }
1178
}
1179

    
1180
void do_mtc0_compare (target_ulong t0)
1181
{
1182
    cpu_mips_store_compare(env, t0);
1183
}
1184

    
1185
void do_mtc0_status (target_ulong t0)
1186
{
1187
    uint32_t val, old;
1188
    uint32_t mask = env->CP0_Status_rw_bitmask;
1189

    
1190
    val = t0 & mask;
1191
    old = env->CP0_Status;
1192
    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1193
    compute_hflags(env);
1194
    if (loglevel & CPU_LOG_EXEC)
1195
        do_mtc0_status_debug(old, val);
1196
    cpu_mips_update_irq(env);
1197
}
1198

    
1199
void do_mttc0_status(target_ulong t0)
1200
{
1201
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1202
    int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
1203

    
1204
    env->CP0_Status = t0 & ~0xf1000018;
1205
    tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (t0 & (0xf << CP0St_CU0));
1206
    tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((t0 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
1207
    tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((t0 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
1208
    if (other_tc == env->current_tc)
1209
        env->active_tc.CP0_TCStatus = tcstatus;
1210
    else
1211
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1212
}
1213

    
1214
void do_mtc0_intctl (target_ulong t0)
1215
{
1216
    /* vectored interrupts not implemented, no performance counters. */
1217
    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (t0 & 0x000002e0);
1218
}
1219

    
1220
void do_mtc0_srsctl (target_ulong t0)
1221
{
1222
    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1223
    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (t0 & mask);
1224
}
1225

    
1226
void do_mtc0_cause (target_ulong t0)
1227
{
1228
    uint32_t mask = 0x00C00300;
1229
    uint32_t old = env->CP0_Cause;
1230

    
1231
    if (env->insn_flags & ISA_MIPS32R2)
1232
        mask |= 1 << CP0Ca_DC;
1233

    
1234
    env->CP0_Cause = (env->CP0_Cause & ~mask) | (t0 & mask);
1235

    
1236
    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1237
        if (env->CP0_Cause & (1 << CP0Ca_DC))
1238
            cpu_mips_stop_count(env);
1239
        else
1240
            cpu_mips_start_count(env);
1241
    }
1242

    
1243
    /* Handle the software interrupt as an hardware one, as they
1244
       are very similar */
1245
    if (t0 & CP0Ca_IP_mask) {
1246
        cpu_mips_update_irq(env);
1247
    }
1248
}
1249

    
1250
void do_mtc0_ebase (target_ulong t0)
1251
{
1252
    /* vectored interrupts not implemented */
1253
    /* Multi-CPU not implemented */
1254
    env->CP0_EBase = 0x80000000 | (t0 & 0x3FFFF000);
1255
}
1256

    
1257
void do_mtc0_config0 (target_ulong t0)
1258
{
1259
    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (t0 & 0x00000007);
1260
}
1261

    
1262
void do_mtc0_config2 (target_ulong t0)
1263
{
1264
    /* tertiary/secondary caches not implemented */
1265
    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1266
}
1267

    
1268
void do_mtc0_watchlo (target_ulong t0, uint32_t sel)
1269
{
1270
    /* Watch exceptions for instructions, data loads, data stores
1271
       not implemented. */
1272
    env->CP0_WatchLo[sel] = (t0 & ~0x7);
1273
}
1274

    
1275
void do_mtc0_watchhi (target_ulong t0, uint32_t sel)
1276
{
1277
    env->CP0_WatchHi[sel] = (t0 & 0x40FF0FF8);
1278
    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & t0 & 0x7);
1279
}
1280

    
1281
void do_mtc0_xcontext (target_ulong t0)
1282
{
1283
    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1284
    env->CP0_XContext = (env->CP0_XContext & mask) | (t0 & ~mask);
1285
}
1286

    
1287
void do_mtc0_framemask (target_ulong t0)
1288
{
1289
    env->CP0_Framemask = t0; /* XXX */
1290
}
1291

    
1292
void do_mtc0_debug (target_ulong t0)
1293
{
1294
    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (t0 & 0x13300120);
1295
    if (t0 & (1 << CP0DB_DM))
1296
        env->hflags |= MIPS_HFLAG_DM;
1297
    else
1298
        env->hflags &= ~MIPS_HFLAG_DM;
1299
}
1300

    
1301
void do_mttc0_debug(target_ulong t0)
1302
{
1303
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1304
    uint32_t val = t0 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1305

    
1306
    /* XXX: Might be wrong, check with EJTAG spec. */
1307
    if (other_tc == env->current_tc)
1308
        env->active_tc.CP0_Debug_tcstatus = val;
1309
    else
1310
        env->tcs[other_tc].CP0_Debug_tcstatus = val;
1311
    env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1312
                     (t0 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1313
}
1314

    
1315
void do_mtc0_performance0 (target_ulong t0)
1316
{
1317
    env->CP0_Performance0 = t0 & 0x000007ff;
1318
}
1319

    
1320
void do_mtc0_taglo (target_ulong t0)
1321
{
1322
    env->CP0_TagLo = t0 & 0xFFFFFCF6;
1323
}
1324

    
1325
void do_mtc0_datalo (target_ulong t0)
1326
{
1327
    env->CP0_DataLo = t0; /* XXX */
1328
}
1329

    
1330
void do_mtc0_taghi (target_ulong t0)
1331
{
1332
    env->CP0_TagHi = t0; /* XXX */
1333
}
1334

    
1335
void do_mtc0_datahi (target_ulong t0)
1336
{
1337
    env->CP0_DataHi = t0; /* XXX */
1338
}
1339

    
1340
void do_mtc0_status_debug(uint32_t old, uint32_t val)
1341
{
1342
    fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",
1343
            old, old & env->CP0_Cause & CP0Ca_IP_mask,
1344
            val, val & env->CP0_Cause & CP0Ca_IP_mask,
1345
            env->CP0_Cause);
1346
    switch (env->hflags & MIPS_HFLAG_KSU) {
1347
    case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
1348
    case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
1349
    case MIPS_HFLAG_KM: fputs("\n", logfile); break;
1350
    default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1351
    }
1352
}
1353

    
1354
void do_mtc0_status_irqraise_debug(void)
1355
{
1356
    fprintf(logfile, "Raise pending IRQs\n");
1357
}
1358
#endif /* !CONFIG_USER_ONLY */
1359

    
1360
/* MIPS MT functions */
1361
target_ulong do_mftgpr(target_ulong t0, uint32_t sel)
1362
{
1363
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1364

    
1365
    if (other_tc == env->current_tc)
1366
        return env->active_tc.gpr[sel];
1367
    else
1368
        return env->tcs[other_tc].gpr[sel];
1369
}
1370

    
1371
target_ulong do_mftlo(target_ulong t0, uint32_t sel)
1372
{
1373
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1374

    
1375
    if (other_tc == env->current_tc)
1376
        return env->active_tc.LO[sel];
1377
    else
1378
        return env->tcs[other_tc].LO[sel];
1379
}
1380

    
1381
target_ulong do_mfthi(target_ulong t0, uint32_t sel)
1382
{
1383
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1384

    
1385
    if (other_tc == env->current_tc)
1386
        return env->active_tc.HI[sel];
1387
    else
1388
        return env->tcs[other_tc].HI[sel];
1389
}
1390

    
1391
target_ulong do_mftacx(target_ulong t0, uint32_t sel)
1392
{
1393
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1394

    
1395
    if (other_tc == env->current_tc)
1396
        return env->active_tc.ACX[sel];
1397
    else
1398
        return env->tcs[other_tc].ACX[sel];
1399
}
1400

    
1401
target_ulong do_mftdsp(target_ulong t0)
1402
{
1403
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1404

    
1405
    if (other_tc == env->current_tc)
1406
        return env->active_tc.DSPControl;
1407
    else
1408
        return env->tcs[other_tc].DSPControl;
1409
}
1410

    
1411
void do_mttgpr(target_ulong t0, uint32_t sel)
1412
{
1413
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1414

    
1415
    if (other_tc == env->current_tc)
1416
        env->active_tc.gpr[sel] = t0;
1417
    else
1418
        env->tcs[other_tc].gpr[sel] = t0;
1419
}
1420

    
1421
void do_mttlo(target_ulong t0, uint32_t sel)
1422
{
1423
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1424

    
1425
    if (other_tc == env->current_tc)
1426
        env->active_tc.LO[sel] = t0;
1427
    else
1428
        env->tcs[other_tc].LO[sel] = t0;
1429
}
1430

    
1431
void do_mtthi(target_ulong t0, uint32_t sel)
1432
{
1433
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1434

    
1435
    if (other_tc == env->current_tc)
1436
        env->active_tc.HI[sel] = t0;
1437
    else
1438
        env->tcs[other_tc].HI[sel] = t0;
1439
}
1440

    
1441
void do_mttacx(target_ulong t0, uint32_t sel)
1442
{
1443
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1444

    
1445
    if (other_tc == env->current_tc)
1446
        env->active_tc.ACX[sel] = t0;
1447
    else
1448
        env->tcs[other_tc].ACX[sel] = t0;
1449
}
1450

    
1451
void do_mttdsp(target_ulong t0)
1452
{
1453
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1454

    
1455
    if (other_tc == env->current_tc)
1456
        env->active_tc.DSPControl = t0;
1457
    else
1458
        env->tcs[other_tc].DSPControl = t0;
1459
}
1460

    
1461
/* MIPS MT functions */
1462
target_ulong do_dmt(target_ulong t0)
1463
{
1464
    // TODO
1465
    t0 = 0;
1466
    // rt = t0
1467

    
1468
    return t0;
1469
}
1470

    
1471
target_ulong do_emt(target_ulong t0)
1472
{
1473
    // TODO
1474
    t0 = 0;
1475
    // rt = t0
1476

    
1477
    return t0;
1478
}
1479

    
1480
target_ulong do_dvpe(target_ulong t0)
1481
{
1482
    // TODO
1483
    t0 = 0;
1484
    // rt = t0
1485

    
1486
    return t0;
1487
}
1488

    
1489
target_ulong do_evpe(target_ulong t0)
1490
{
1491
    // TODO
1492
    t0 = 0;
1493
    // rt = t0
1494

    
1495
    return t0;
1496
}
1497

    
1498
void do_fork(target_ulong t0, target_ulong t1)
1499
{
1500
    // t0 = rt, t1 = rs
1501
    t0 = 0;
1502
    // TODO: store to TC register
1503
}
1504

    
1505
target_ulong do_yield(target_ulong t0)
1506
{
1507
    if (t0 < 0) {
1508
        /* No scheduling policy implemented. */
1509
        if (t0 != -2) {
1510
            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1511
                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1512
                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1513
                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1514
                do_raise_exception(EXCP_THREAD);
1515
            }
1516
        }
1517
    } else if (t0 == 0) {
1518
        if (0 /* TODO: TC underflow */) {
1519
            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1520
            do_raise_exception(EXCP_THREAD);
1521
        } else {
1522
            // TODO: Deallocate TC
1523
        }
1524
    } else if (t0 > 0) {
1525
        /* Yield qualifier inputs not implemented. */
1526
        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1527
        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1528
        do_raise_exception(EXCP_THREAD);
1529
    }
1530
    return env->CP0_YQMask;
1531
}
1532

    
1533
#ifndef CONFIG_USER_ONLY
1534
/* TLB management */
1535
void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1536
{
1537
    /* Flush qemu's TLB and discard all shadowed entries.  */
1538
    tlb_flush (env, flush_global);
1539
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
1540
}
1541

    
1542
static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1543
{
1544
    /* Discard entries from env->tlb[first] onwards.  */
1545
    while (env->tlb->tlb_in_use > first) {
1546
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1547
    }
1548
}
1549

    
1550
static void r4k_fill_tlb (int idx)
1551
{
1552
    r4k_tlb_t *tlb;
1553

    
1554
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1555
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1556
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1557
#if defined(TARGET_MIPS64)
1558
    tlb->VPN &= env->SEGMask;
1559
#endif
1560
    tlb->ASID = env->CP0_EntryHi & 0xFF;
1561
    tlb->PageMask = env->CP0_PageMask;
1562
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1563
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1564
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1565
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1566
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1567
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1568
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1569
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1570
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1571
}
1572

    
1573
void r4k_do_tlbwi (void)
1574
{
1575
    int idx;
1576

    
1577
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1578

    
1579
    /* Discard cached TLB entries.  We could avoid doing this if the
1580
       tlbwi is just upgrading access permissions on the current entry;
1581
       that might be a further win.  */
1582
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1583

    
1584
    r4k_invalidate_tlb(env, idx, 0);
1585
    r4k_fill_tlb(idx);
1586
}
1587

    
1588
void r4k_do_tlbwr (void)
1589
{
1590
    int r = cpu_mips_get_random(env);
1591

    
1592
    r4k_invalidate_tlb(env, r, 1);
1593
    r4k_fill_tlb(r);
1594
}
1595

    
1596
void r4k_do_tlbp (void)
1597
{
1598
    r4k_tlb_t *tlb;
1599
    target_ulong mask;
1600
    target_ulong tag;
1601
    target_ulong VPN;
1602
    uint8_t ASID;
1603
    int i;
1604

    
1605
    ASID = env->CP0_EntryHi & 0xFF;
1606
    for (i = 0; i < env->tlb->nb_tlb; i++) {
1607
        tlb = &env->tlb->mmu.r4k.tlb[i];
1608
        /* 1k pages are not supported. */
1609
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1610
        tag = env->CP0_EntryHi & ~mask;
1611
        VPN = tlb->VPN & ~mask;
1612
        /* Check ASID, virtual page number & size */
1613
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1614
            /* TLB match */
1615
            env->CP0_Index = i;
1616
            break;
1617
        }
1618
    }
1619
    if (i == env->tlb->nb_tlb) {
1620
        /* No match.  Discard any shadow entries, if any of them match.  */
1621
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1622
            tlb = &env->tlb->mmu.r4k.tlb[i];
1623
            /* 1k pages are not supported. */
1624
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1625
            tag = env->CP0_EntryHi & ~mask;
1626
            VPN = tlb->VPN & ~mask;
1627
            /* Check ASID, virtual page number & size */
1628
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1629
                r4k_mips_tlb_flush_extra (env, i);
1630
                break;
1631
            }
1632
        }
1633

    
1634
        env->CP0_Index |= 0x80000000;
1635
    }
1636
}
1637

    
1638
void r4k_do_tlbr (void)
1639
{
1640
    r4k_tlb_t *tlb;
1641
    uint8_t ASID;
1642
    int idx;
1643

    
1644
    ASID = env->CP0_EntryHi & 0xFF;
1645
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1646
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1647

    
1648
    /* If this will change the current ASID, flush qemu's TLB.  */
1649
    if (ASID != tlb->ASID)
1650
        cpu_mips_tlb_flush (env, 1);
1651

    
1652
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1653

    
1654
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1655
    env->CP0_PageMask = tlb->PageMask;
1656
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1657
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1658
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1659
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1660
}
1661

    
1662
/* Specials */
1663
target_ulong do_di (void)
1664
{
1665
    target_ulong t0 = env->CP0_Status;
1666

    
1667
    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1668
    cpu_mips_update_irq(env);
1669

    
1670
    return t0;
1671
}
1672

    
1673
target_ulong do_ei (void)
1674
{
1675
    target_ulong t0 = env->CP0_Status;
1676

    
1677
    env->CP0_Status = t0 | (1 << CP0St_IE);
1678
    cpu_mips_update_irq(env);
1679

    
1680
    return t0;
1681
}
1682

    
1683
void debug_pre_eret (void)
1684
{
1685
    fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1686
            env->active_tc.PC, env->CP0_EPC);
1687
    if (env->CP0_Status & (1 << CP0St_ERL))
1688
        fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1689
    if (env->hflags & MIPS_HFLAG_DM)
1690
        fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1691
    fputs("\n", logfile);
1692
}
1693

    
1694
void debug_post_eret (void)
1695
{
1696
    fprintf(logfile, "  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1697
            env->active_tc.PC, env->CP0_EPC);
1698
    if (env->CP0_Status & (1 << CP0St_ERL))
1699
        fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1700
    if (env->hflags & MIPS_HFLAG_DM)
1701
        fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1702
    switch (env->hflags & MIPS_HFLAG_KSU) {
1703
    case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
1704
    case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
1705
    case MIPS_HFLAG_KM: fputs("\n", logfile); break;
1706
    default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1707
    }
1708
}
1709

    
1710
void do_eret (void)
1711
{
1712
    if (loglevel & CPU_LOG_EXEC)
1713
        debug_pre_eret();
1714
    if (env->CP0_Status & (1 << CP0St_ERL)) {
1715
        env->active_tc.PC = env->CP0_ErrorEPC;
1716
        env->CP0_Status &= ~(1 << CP0St_ERL);
1717
    } else {
1718
        env->active_tc.PC = env->CP0_EPC;
1719
        env->CP0_Status &= ~(1 << CP0St_EXL);
1720
    }
1721
    compute_hflags(env);
1722
    if (loglevel & CPU_LOG_EXEC)
1723
        debug_post_eret();
1724
    env->CP0_LLAddr = 1;
1725
}
1726

    
1727
void do_deret (void)
1728
{
1729
    if (loglevel & CPU_LOG_EXEC)
1730
        debug_pre_eret();
1731
    env->active_tc.PC = env->CP0_DEPC;
1732
    env->hflags &= MIPS_HFLAG_DM;
1733
    compute_hflags(env);
1734
    if (loglevel & CPU_LOG_EXEC)
1735
        debug_post_eret();
1736
    env->CP0_LLAddr = 1;
1737
}
1738
#endif /* !CONFIG_USER_ONLY */
1739

    
1740
target_ulong do_rdhwr_cpunum(void)
1741
{
1742
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1743
        (env->CP0_HWREna & (1 << 0)))
1744
        return env->CP0_EBase & 0x3ff;
1745
    else
1746
        do_raise_exception(EXCP_RI);
1747

    
1748
    return 0;
1749
}
1750

    
1751
target_ulong do_rdhwr_synci_step(void)
1752
{
1753
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1754
        (env->CP0_HWREna & (1 << 1)))
1755
        return env->SYNCI_Step;
1756
    else
1757
        do_raise_exception(EXCP_RI);
1758

    
1759
    return 0;
1760
}
1761

    
1762
target_ulong do_rdhwr_cc(void)
1763
{
1764
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1765
        (env->CP0_HWREna & (1 << 2)))
1766
        return env->CP0_Count;
1767
    else
1768
        do_raise_exception(EXCP_RI);
1769

    
1770
    return 0;
1771
}
1772

    
1773
target_ulong do_rdhwr_ccres(void)
1774
{
1775
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1776
        (env->CP0_HWREna & (1 << 3)))
1777
        return env->CCRes;
1778
    else
1779
        do_raise_exception(EXCP_RI);
1780

    
1781
    return 0;
1782
}
1783

    
1784
/* Bitfield operations. */
1785
target_ulong do_ext(target_ulong t1, uint32_t pos, uint32_t size)
1786
{
1787
    return (int32_t)((t1 >> pos) & ((size < 32) ? ((1 << size) - 1) : ~0));
1788
}
1789

    
1790
target_ulong do_ins(target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size)
1791
{
1792
    target_ulong mask = ((size < 32) ? ((1 << size) - 1) : ~0) << pos;
1793

    
1794
    return (int32_t)((t0 & ~mask) | ((t1 << pos) & mask));
1795
}
1796

    
1797
target_ulong do_wsbh(target_ulong t1)
1798
{
1799
    return (int32_t)(((t1 << 8) & ~0x00FF00FF) | ((t1 >> 8) & 0x00FF00FF));
1800
}
1801

    
1802
#if defined(TARGET_MIPS64)
1803
target_ulong do_dext(target_ulong t1, uint32_t pos, uint32_t size)
1804
{
1805
    return (t1 >> pos) & ((size < 64) ? ((1ULL << size) - 1) : ~0ULL);
1806
}
1807

    
1808
target_ulong do_dins(target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size)
1809
{
1810
    target_ulong mask = ((size < 64) ? ((1ULL << size) - 1) : ~0ULL) << pos;
1811

    
1812
    return (t0 & ~mask) | ((t1 << pos) & mask);
1813
}
1814

    
1815
target_ulong do_dsbh(target_ulong t1)
1816
{
1817
    return ((t1 << 8) & ~0x00FF00FF00FF00FFULL) | ((t1 >> 8) & 0x00FF00FF00FF00FFULL);
1818
}
1819

    
1820
target_ulong do_dshd(target_ulong t1)
1821
{
1822
    t1 = ((t1 << 16) & ~0x0000FFFF0000FFFFULL) | ((t1 >> 16) & 0x0000FFFF0000FFFFULL);
1823
    return (t1 << 32) | (t1 >> 32);
1824
}
1825
#endif
1826

    
1827
void do_pmon (int function)
1828
{
1829
    function /= 2;
1830
    switch (function) {
1831
    case 2: /* TODO: char inbyte(int waitflag); */
1832
        if (env->active_tc.gpr[4] == 0)
1833
            env->active_tc.gpr[2] = -1;
1834
        /* Fall through */
1835
    case 11: /* TODO: char inbyte (void); */
1836
        env->active_tc.gpr[2] = -1;
1837
        break;
1838
    case 3:
1839
    case 12:
1840
        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1841
        break;
1842
    case 17:
1843
        break;
1844
    case 158:
1845
        {
1846
            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
1847
            printf("%s", fmt);
1848
        }
1849
        break;
1850
    }
1851
}
1852

    
1853
void do_wait (void)
1854
{
1855
    env->halted = 1;
1856
    do_raise_exception(EXCP_HLT);
1857
}
1858

    
1859
#if !defined(CONFIG_USER_ONLY)
1860

    
1861
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1862

    
1863
#define MMUSUFFIX _mmu
1864
#define ALIGNED_ONLY
1865

    
1866
#define SHIFT 0
1867
#include "softmmu_template.h"
1868

    
1869
#define SHIFT 1
1870
#include "softmmu_template.h"
1871

    
1872
#define SHIFT 2
1873
#include "softmmu_template.h"
1874

    
1875
#define SHIFT 3
1876
#include "softmmu_template.h"
1877

    
1878
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
1879
{
1880
    env->CP0_BadVAddr = addr;
1881
    do_restore_state (retaddr);
1882
    do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
1883
}
1884

    
1885
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1886
{
1887
    TranslationBlock *tb;
1888
    CPUState *saved_env;
1889
    unsigned long pc;
1890
    int ret;
1891

    
1892
    /* XXX: hack to restore env in all cases, even if not called from
1893
       generated code */
1894
    saved_env = env;
1895
    env = cpu_single_env;
1896
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1897
    if (ret) {
1898
        if (retaddr) {
1899
            /* now we have a real cpu fault */
1900
            pc = (unsigned long)retaddr;
1901
            tb = tb_find_pc(pc);
1902
            if (tb) {
1903
                /* the PC is inside the translated code. It means that we have
1904
                   a virtual CPU fault */
1905
                cpu_restore_state(tb, env, pc, NULL);
1906
            }
1907
        }
1908
        do_raise_exception_err(env->exception_index, env->error_code);
1909
    }
1910
    env = saved_env;
1911
}
1912

    
1913
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
1914
                          int unused, int size)
1915
{
1916
    if (is_exec)
1917
        do_raise_exception(EXCP_IBE);
1918
    else
1919
        do_raise_exception(EXCP_DBE);
1920
}
1921
#endif /* !CONFIG_USER_ONLY */
1922

    
1923
/* Complex FPU operations which may need stack space. */
1924

    
1925
#define FLOAT_ONE32 make_float32(0x3f8 << 20)
1926
#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
1927
#define FLOAT_TWO32 make_float32(1 << 30)
1928
#define FLOAT_TWO64 make_float64(1ULL << 62)
1929
#define FLOAT_QNAN32 0x7fbfffff
1930
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
1931
#define FLOAT_SNAN32 0x7fffffff
1932
#define FLOAT_SNAN64 0x7fffffffffffffffULL
1933

    
1934
/* convert MIPS rounding mode in FCR31 to IEEE library */
1935
unsigned int ieee_rm[] = {
1936
    float_round_nearest_even,
1937
    float_round_to_zero,
1938
    float_round_up,
1939
    float_round_down
1940
};
1941

    
1942
#define RESTORE_ROUNDING_MODE \
1943
    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1944

    
1945
target_ulong do_cfc1 (uint32_t reg)
1946
{
1947
    target_ulong t0;
1948

    
1949
    switch (reg) {
1950
    case 0:
1951
        t0 = (int32_t)env->active_fpu.fcr0;
1952
        break;
1953
    case 25:
1954
        t0 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
1955
        break;
1956
    case 26:
1957
        t0 = env->active_fpu.fcr31 & 0x0003f07c;
1958
        break;
1959
    case 28:
1960
        t0 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
1961
        break;
1962
    default:
1963
        t0 = (int32_t)env->active_fpu.fcr31;
1964
        break;
1965
    }
1966

    
1967
    return t0;
1968
}
1969

    
1970
void do_ctc1 (target_ulong t0, uint32_t reg)
1971
{
1972
    switch(reg) {
1973
    case 25:
1974
        if (t0 & 0xffffff00)
1975
            return;
1976
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((t0 & 0xfe) << 24) |
1977
                     ((t0 & 0x1) << 23);
1978
        break;
1979
    case 26:
1980
        if (t0 & 0x007c0000)
1981
            return;
1982
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (t0 & 0x0003f07c);
1983
        break;
1984
    case 28:
1985
        if (t0 & 0x007c0000)
1986
            return;
1987
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (t0 & 0x00000f83) |
1988
                     ((t0 & 0x4) << 22);
1989
        break;
1990
    case 31:
1991
        if (t0 & 0x007c0000)
1992
            return;
1993
        env->active_fpu.fcr31 = t0;
1994
        break;
1995
    default:
1996
        return;
1997
    }
1998
    /* set rounding mode */
1999
    RESTORE_ROUNDING_MODE;
2000
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2001
    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2002
        do_raise_exception(EXCP_FPE);
2003
}
2004

    
2005
static inline char ieee_ex_to_mips(char xcpt)
2006
{
2007
    return (xcpt & float_flag_inexact) >> 5 |
2008
           (xcpt & float_flag_underflow) >> 3 |
2009
           (xcpt & float_flag_overflow) >> 1 |
2010
           (xcpt & float_flag_divbyzero) << 1 |
2011
           (xcpt & float_flag_invalid) << 4;
2012
}
2013

    
2014
static inline char mips_ex_to_ieee(char xcpt)
2015
{
2016
    return (xcpt & FP_INEXACT) << 5 |
2017
           (xcpt & FP_UNDERFLOW) << 3 |
2018
           (xcpt & FP_OVERFLOW) << 1 |
2019
           (xcpt & FP_DIV0) >> 1 |
2020
           (xcpt & FP_INVALID) >> 4;
2021
}
2022

    
2023
static inline void update_fcr31(void)
2024
{
2025
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2026

    
2027
    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2028
    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2029
        do_raise_exception(EXCP_FPE);
2030
    else
2031
        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2032
}
2033

    
2034
/* Float support.
2035
   Single precition routines have a "s" suffix, double precision a
2036
   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2037
   paired single lower "pl", paired single upper "pu".  */
2038

    
2039
/* unary operations, modifying fp status  */
2040
uint64_t do_float_sqrt_d(uint64_t fdt0)
2041
{
2042
    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2043
}
2044

    
2045
uint32_t do_float_sqrt_s(uint32_t fst0)
2046
{
2047
    return float32_sqrt(fst0, &env->active_fpu.fp_status);
2048
}
2049

    
2050
uint64_t do_float_cvtd_s(uint32_t fst0)
2051
{
2052
    uint64_t fdt2;
2053

    
2054
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2055
    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2056
    update_fcr31();
2057
    return fdt2;
2058
}
2059

    
2060
uint64_t do_float_cvtd_w(uint32_t wt0)
2061
{
2062
    uint64_t fdt2;
2063

    
2064
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2065
    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2066
    update_fcr31();
2067
    return fdt2;
2068
}
2069

    
2070
uint64_t do_float_cvtd_l(uint64_t dt0)
2071
{
2072
    uint64_t fdt2;
2073

    
2074
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2075
    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2076
    update_fcr31();
2077
    return fdt2;
2078
}
2079

    
2080
uint64_t do_float_cvtl_d(uint64_t fdt0)
2081
{
2082
    uint64_t dt2;
2083

    
2084
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2085
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2086
    update_fcr31();
2087
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2088
        dt2 = FLOAT_SNAN64;
2089
    return dt2;
2090
}
2091

    
2092
uint64_t do_float_cvtl_s(uint32_t fst0)
2093
{
2094
    uint64_t dt2;
2095

    
2096
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2097
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2098
    update_fcr31();
2099
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2100
        dt2 = FLOAT_SNAN64;
2101
    return dt2;
2102
}
2103

    
2104
uint64_t do_float_cvtps_pw(uint64_t dt0)
2105
{
2106
    uint32_t fst2;
2107
    uint32_t fsth2;
2108

    
2109
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2110
    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2111
    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2112
    update_fcr31();
2113
    return ((uint64_t)fsth2 << 32) | fst2;
2114
}
2115

    
2116
uint64_t do_float_cvtpw_ps(uint64_t fdt0)
2117
{
2118
    uint32_t wt2;
2119
    uint32_t wth2;
2120

    
2121
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2122
    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2123
    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2124
    update_fcr31();
2125
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2126
        wt2 = FLOAT_SNAN32;
2127
        wth2 = FLOAT_SNAN32;
2128
    }
2129
    return ((uint64_t)wth2 << 32) | wt2;
2130
}
2131

    
2132
uint32_t do_float_cvts_d(uint64_t fdt0)
2133
{
2134
    uint32_t fst2;
2135

    
2136
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2137
    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2138
    update_fcr31();
2139
    return fst2;
2140
}
2141

    
2142
uint32_t do_float_cvts_w(uint32_t wt0)
2143
{
2144
    uint32_t fst2;
2145

    
2146
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2147
    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2148
    update_fcr31();
2149
    return fst2;
2150
}
2151

    
2152
uint32_t do_float_cvts_l(uint64_t dt0)
2153
{
2154
    uint32_t fst2;
2155

    
2156
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2157
    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2158
    update_fcr31();
2159
    return fst2;
2160
}
2161

    
2162
uint32_t do_float_cvts_pl(uint32_t wt0)
2163
{
2164
    uint32_t wt2;
2165

    
2166
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2167
    wt2 = wt0;
2168
    update_fcr31();
2169
    return wt2;
2170
}
2171

    
2172
uint32_t do_float_cvts_pu(uint32_t wth0)
2173
{
2174
    uint32_t wt2;
2175

    
2176
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2177
    wt2 = wth0;
2178
    update_fcr31();
2179
    return wt2;
2180
}
2181

    
2182
uint32_t do_float_cvtw_s(uint32_t fst0)
2183
{
2184
    uint32_t wt2;
2185

    
2186
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2187
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2188
    update_fcr31();
2189
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2190
        wt2 = FLOAT_SNAN32;
2191
    return wt2;
2192
}
2193

    
2194
uint32_t do_float_cvtw_d(uint64_t fdt0)
2195
{
2196
    uint32_t wt2;
2197

    
2198
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2199
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2200
    update_fcr31();
2201
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2202
        wt2 = FLOAT_SNAN32;
2203
    return wt2;
2204
}
2205

    
2206
uint64_t do_float_roundl_d(uint64_t fdt0)
2207
{
2208
    uint64_t dt2;
2209

    
2210
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2211
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2212
    RESTORE_ROUNDING_MODE;
2213
    update_fcr31();
2214
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2215
        dt2 = FLOAT_SNAN64;
2216
    return dt2;
2217
}
2218

    
2219
uint64_t do_float_roundl_s(uint32_t fst0)
2220
{
2221
    uint64_t dt2;
2222

    
2223
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2224
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2225
    RESTORE_ROUNDING_MODE;
2226
    update_fcr31();
2227
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2228
        dt2 = FLOAT_SNAN64;
2229
    return dt2;
2230
}
2231

    
2232
uint32_t do_float_roundw_d(uint64_t fdt0)
2233
{
2234
    uint32_t wt2;
2235

    
2236
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2237
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2238
    RESTORE_ROUNDING_MODE;
2239
    update_fcr31();
2240
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2241
        wt2 = FLOAT_SNAN32;
2242
    return wt2;
2243
}
2244

    
2245
uint32_t do_float_roundw_s(uint32_t fst0)
2246
{
2247
    uint32_t wt2;
2248

    
2249
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2250
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2251
    RESTORE_ROUNDING_MODE;
2252
    update_fcr31();
2253
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2254
        wt2 = FLOAT_SNAN32;
2255
    return wt2;
2256
}
2257

    
2258
uint64_t do_float_truncl_d(uint64_t fdt0)
2259
{
2260
    uint64_t dt2;
2261

    
2262
    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2263
    update_fcr31();
2264
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2265
        dt2 = FLOAT_SNAN64;
2266
    return dt2;
2267
}
2268

    
2269
uint64_t do_float_truncl_s(uint32_t fst0)
2270
{
2271
    uint64_t dt2;
2272

    
2273
    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2274
    update_fcr31();
2275
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2276
        dt2 = FLOAT_SNAN64;
2277
    return dt2;
2278
}
2279

    
2280
uint32_t do_float_truncw_d(uint64_t fdt0)
2281
{
2282
    uint32_t wt2;
2283

    
2284
    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2285
    update_fcr31();
2286
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2287
        wt2 = FLOAT_SNAN32;
2288
    return wt2;
2289
}
2290

    
2291
uint32_t do_float_truncw_s(uint32_t fst0)
2292
{
2293
    uint32_t wt2;
2294

    
2295
    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2296
    update_fcr31();
2297
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2298
        wt2 = FLOAT_SNAN32;
2299
    return wt2;
2300
}
2301

    
2302
uint64_t do_float_ceill_d(uint64_t fdt0)
2303
{
2304
    uint64_t dt2;
2305

    
2306
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2307
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2308
    RESTORE_ROUNDING_MODE;
2309
    update_fcr31();
2310
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2311
        dt2 = FLOAT_SNAN64;
2312
    return dt2;
2313
}
2314

    
2315
uint64_t do_float_ceill_s(uint32_t fst0)
2316
{
2317
    uint64_t dt2;
2318

    
2319
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2320
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2321
    RESTORE_ROUNDING_MODE;
2322
    update_fcr31();
2323
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2324
        dt2 = FLOAT_SNAN64;
2325
    return dt2;
2326
}
2327

    
2328
uint32_t do_float_ceilw_d(uint64_t fdt0)
2329
{
2330
    uint32_t wt2;
2331

    
2332
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2333
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2334
    RESTORE_ROUNDING_MODE;
2335
    update_fcr31();
2336
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2337
        wt2 = FLOAT_SNAN32;
2338
    return wt2;
2339
}
2340

    
2341
uint32_t do_float_ceilw_s(uint32_t fst0)
2342
{
2343
    uint32_t wt2;
2344

    
2345
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2346
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2347
    RESTORE_ROUNDING_MODE;
2348
    update_fcr31();
2349
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2350
        wt2 = FLOAT_SNAN32;
2351
    return wt2;
2352
}
2353

    
2354
uint64_t do_float_floorl_d(uint64_t fdt0)
2355
{
2356
    uint64_t dt2;
2357

    
2358
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2359
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2360
    RESTORE_ROUNDING_MODE;
2361
    update_fcr31();
2362
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2363
        dt2 = FLOAT_SNAN64;
2364
    return dt2;
2365
}
2366

    
2367
uint64_t do_float_floorl_s(uint32_t fst0)
2368
{
2369
    uint64_t dt2;
2370

    
2371
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2372
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2373
    RESTORE_ROUNDING_MODE;
2374
    update_fcr31();
2375
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2376
        dt2 = FLOAT_SNAN64;
2377
    return dt2;
2378
}
2379

    
2380
uint32_t do_float_floorw_d(uint64_t fdt0)
2381
{
2382
    uint32_t wt2;
2383

    
2384
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2385
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2386
    RESTORE_ROUNDING_MODE;
2387
    update_fcr31();
2388
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2389
        wt2 = FLOAT_SNAN32;
2390
    return wt2;
2391
}
2392

    
2393
uint32_t do_float_floorw_s(uint32_t fst0)
2394
{
2395
    uint32_t wt2;
2396

    
2397
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2398
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2399
    RESTORE_ROUNDING_MODE;
2400
    update_fcr31();
2401
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2402
        wt2 = FLOAT_SNAN32;
2403
    return wt2;
2404
}
2405

    
2406
/* unary operations, not modifying fp status  */
2407
#define FLOAT_UNOP(name)                                       \
2408
uint64_t do_float_ ## name ## _d(uint64_t fdt0)                \
2409
{                                                              \
2410
    return float64_ ## name(fdt0);                             \
2411
}                                                              \
2412
uint32_t do_float_ ## name ## _s(uint32_t fst0)                \
2413
{                                                              \
2414
    return float32_ ## name(fst0);                             \
2415
}                                                              \
2416
uint64_t do_float_ ## name ## _ps(uint64_t fdt0)               \
2417
{                                                              \
2418
    uint32_t wt0;                                              \
2419
    uint32_t wth0;                                             \
2420
                                                               \
2421
    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2422
    wth0 = float32_ ## name(fdt0 >> 32);                       \
2423
    return ((uint64_t)wth0 << 32) | wt0;                       \
2424
}
2425
FLOAT_UNOP(abs)
2426
FLOAT_UNOP(chs)
2427
#undef FLOAT_UNOP
2428

    
2429
/* MIPS specific unary operations */
2430
uint64_t do_float_recip_d(uint64_t fdt0)
2431
{
2432
    uint64_t fdt2;
2433

    
2434
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2435
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2436
    update_fcr31();
2437
    return fdt2;
2438
}
2439

    
2440
uint32_t do_float_recip_s(uint32_t fst0)
2441
{
2442
    uint32_t fst2;
2443

    
2444
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2445
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2446
    update_fcr31();
2447
    return fst2;
2448
}
2449

    
2450
uint64_t do_float_rsqrt_d(uint64_t fdt0)
2451
{
2452
    uint64_t fdt2;
2453

    
2454
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2455
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2456
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2457
    update_fcr31();
2458
    return fdt2;
2459
}
2460

    
2461
uint32_t do_float_rsqrt_s(uint32_t fst0)
2462
{
2463
    uint32_t fst2;
2464

    
2465
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2466
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2467
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2468
    update_fcr31();
2469
    return fst2;
2470
}
2471

    
2472
uint64_t do_float_recip1_d(uint64_t fdt0)
2473
{
2474
    uint64_t fdt2;
2475

    
2476
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2477
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2478
    update_fcr31();
2479
    return fdt2;
2480
}
2481

    
2482
uint32_t do_float_recip1_s(uint32_t fst0)
2483
{
2484
    uint32_t fst2;
2485

    
2486
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2487
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2488
    update_fcr31();
2489
    return fst2;
2490
}
2491

    
2492
uint64_t do_float_recip1_ps(uint64_t fdt0)
2493
{
2494
    uint32_t fst2;
2495
    uint32_t fsth2;
2496

    
2497
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2498
    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2499
    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2500
    update_fcr31();
2501
    return ((uint64_t)fsth2 << 32) | fst2;
2502
}
2503

    
2504
uint64_t do_float_rsqrt1_d(uint64_t fdt0)
2505
{
2506
    uint64_t fdt2;
2507

    
2508
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2509
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2510
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2511
    update_fcr31();
2512
    return fdt2;
2513
}
2514

    
2515
uint32_t do_float_rsqrt1_s(uint32_t fst0)
2516
{
2517
    uint32_t fst2;
2518

    
2519
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2520
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2521
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2522
    update_fcr31();
2523
    return fst2;
2524
}
2525

    
2526
uint64_t do_float_rsqrt1_ps(uint64_t fdt0)
2527
{
2528
    uint32_t fst2;
2529
    uint32_t fsth2;
2530

    
2531
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2532
    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2533
    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2534
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2535
    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2536
    update_fcr31();
2537
    return ((uint64_t)fsth2 << 32) | fst2;
2538
}
2539

    
2540
#define FLOAT_OP(name, p) void do_float_##name##_##p(void)
2541

    
2542
/* binary operations */
2543
#define FLOAT_BINOP(name)                                          \
2544
uint64_t do_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
2545
{                                                                  \
2546
    uint64_t dt2;                                                  \
2547
                                                                   \
2548
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2549
    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
2550
    update_fcr31();                                                \
2551
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2552
        dt2 = FLOAT_QNAN64;                                        \
2553
    return dt2;                                                    \
2554
}                                                                  \
2555
                                                                   \
2556
uint32_t do_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
2557
{                                                                  \
2558
    uint32_t wt2;                                                  \
2559
                                                                   \
2560
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2561
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2562
    update_fcr31();                                                \
2563
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2564
        wt2 = FLOAT_QNAN32;                                        \
2565
    return wt2;                                                    \
2566
}                                                                  \
2567
                                                                   \
2568
uint64_t do_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
2569
{                                                                  \
2570
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2571
    uint32_t fsth0 = fdt0 >> 32;                                   \
2572
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2573
    uint32_t fsth1 = fdt1 >> 32;                                   \
2574
    uint32_t wt2;                                                  \
2575
    uint32_t wth2;                                                 \
2576
                                                                   \
2577
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2578
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2579
    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
2580
    update_fcr31();                                                \
2581
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
2582
        wt2 = FLOAT_QNAN32;                                        \
2583
        wth2 = FLOAT_QNAN32;                                       \
2584
    }                                                              \
2585
    return ((uint64_t)wth2 << 32) | wt2;                           \
2586
}
2587

    
2588
FLOAT_BINOP(add)
2589
FLOAT_BINOP(sub)
2590
FLOAT_BINOP(mul)
2591
FLOAT_BINOP(div)
2592
#undef FLOAT_BINOP
2593

    
2594
/* ternary operations */
2595
#define FLOAT_TERNOP(name1, name2)                                        \
2596
uint64_t do_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
2597
                                           uint64_t fdt2)                 \
2598
{                                                                         \
2599
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2600
    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2601
}                                                                         \
2602
                                                                          \
2603
uint32_t do_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
2604
                                           uint32_t fst2)                 \
2605
{                                                                         \
2606
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2607
    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2608
}                                                                         \
2609
                                                                          \
2610
uint64_t do_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2611
                                            uint64_t fdt2)                \
2612
{                                                                         \
2613
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2614
    uint32_t fsth0 = fdt0 >> 32;                                          \
2615
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2616
    uint32_t fsth1 = fdt1 >> 32;                                          \
2617
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2618
    uint32_t fsth2 = fdt2 >> 32;                                          \
2619
                                                                          \
2620
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2621
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2622
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2623
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2624
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2625
}
2626

    
2627
FLOAT_TERNOP(mul, add)
2628
FLOAT_TERNOP(mul, sub)
2629
#undef FLOAT_TERNOP
2630

    
2631
/* negated ternary operations */
2632
#define FLOAT_NTERNOP(name1, name2)                                       \
2633
uint64_t do_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2634
                                           uint64_t fdt2)                 \
2635
{                                                                         \
2636
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2637
    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2638
    return float64_chs(fdt2);                                             \
2639
}                                                                         \
2640
                                                                          \
2641
uint32_t do_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2642
                                           uint32_t fst2)                 \
2643
{                                                                         \
2644
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2645
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2646
    return float32_chs(fst2);                                             \
2647
}                                                                         \
2648
                                                                          \
2649
uint64_t do_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2650
                                           uint64_t fdt2)                 \
2651
{                                                                         \
2652
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2653
    uint32_t fsth0 = fdt0 >> 32;                                          \
2654
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2655
    uint32_t fsth1 = fdt1 >> 32;                                          \
2656
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2657
    uint32_t fsth2 = fdt2 >> 32;                                          \
2658
                                                                          \
2659
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2660
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2661
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2662
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2663
    fst2 = float32_chs(fst2);                                             \
2664
    fsth2 = float32_chs(fsth2);                                           \
2665
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2666
}
2667

    
2668
FLOAT_NTERNOP(mul, add)
2669
FLOAT_NTERNOP(mul, sub)
2670
#undef FLOAT_NTERNOP
2671

    
2672
/* MIPS specific binary operations */
2673
uint64_t do_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2674
{
2675
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2676
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2677
    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2678
    update_fcr31();
2679
    return fdt2;
2680
}
2681

    
2682
uint32_t do_float_recip2_s(uint32_t fst0, uint32_t fst2)
2683
{
2684
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2685
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2686
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2687
    update_fcr31();
2688
    return fst2;
2689
}
2690

    
2691
uint64_t do_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2692
{
2693
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2694
    uint32_t fsth0 = fdt0 >> 32;
2695
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2696
    uint32_t fsth2 = fdt2 >> 32;
2697

    
2698
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2699
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2700
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2701
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2702
    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
2703
    update_fcr31();
2704
    return ((uint64_t)fsth2 << 32) | fst2;
2705
}
2706

    
2707
uint64_t do_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
2708
{
2709
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2710
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2711
    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
2712
    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
2713
    update_fcr31();
2714
    return fdt2;
2715
}
2716

    
2717
uint32_t do_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
2718
{
2719
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2720
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2721
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2722
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2723
    update_fcr31();
2724
    return fst2;
2725
}
2726

    
2727
uint64_t do_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
2728
{
2729
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2730
    uint32_t fsth0 = fdt0 >> 32;
2731
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2732
    uint32_t fsth2 = fdt2 >> 32;
2733

    
2734
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2735
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2736
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2737
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2738
    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
2739
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2740
    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
2741
    update_fcr31();
2742
    return ((uint64_t)fsth2 << 32) | fst2;
2743
}
2744

    
2745
uint64_t do_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
2746
{
2747
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2748
    uint32_t fsth0 = fdt0 >> 32;
2749
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2750
    uint32_t fsth1 = fdt1 >> 32;
2751
    uint32_t fst2;
2752
    uint32_t fsth2;
2753

    
2754
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2755
    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
2756
    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
2757
    update_fcr31();
2758
    return ((uint64_t)fsth2 << 32) | fst2;
2759
}
2760

    
2761
uint64_t do_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
2762
{
2763
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2764
    uint32_t fsth0 = fdt0 >> 32;
2765
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2766
    uint32_t fsth1 = fdt1 >> 32;
2767
    uint32_t fst2;
2768
    uint32_t fsth2;
2769

    
2770
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2771
    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
2772
    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
2773
    update_fcr31();
2774
    return ((uint64_t)fsth2 << 32) | fst2;
2775
}
2776

    
2777
/* compare operations */
2778
#define FOP_COND_D(op, cond)                                   \
2779
void do_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2780
{                                                              \
2781
    int c = cond;                                              \
2782
    update_fcr31();                                            \
2783
    if (c)                                                     \
2784
        SET_FP_COND(cc, env->active_fpu);                      \
2785
    else                                                       \
2786
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2787
}                                                              \
2788
void do_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2789
{                                                              \
2790
    int c;                                                     \
2791
    fdt0 = float64_abs(fdt0);                                  \
2792
    fdt1 = float64_abs(fdt1);                                  \
2793
    c = cond;                                                  \
2794
    update_fcr31();                                            \
2795
    if (c)                                                     \
2796
        SET_FP_COND(cc, env->active_fpu);                      \
2797
    else                                                       \
2798
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2799
}
2800

    
2801
int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
2802
{
2803
    if (float64_is_signaling_nan(a) ||
2804
        float64_is_signaling_nan(b) ||
2805
        (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
2806
        float_raise(float_flag_invalid, status);
2807
        return 1;
2808
    } else if (float64_is_nan(a) || float64_is_nan(b)) {
2809
        return 1;
2810
    } else {
2811
        return 0;
2812
    }
2813
}
2814

    
2815
/* NOTE: the comma operator will make "cond" to eval to false,
2816
 * but float*_is_unordered() is still called. */
2817
FOP_COND_D(f,   (float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2818
FOP_COND_D(un,  float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status))
2819
FOP_COND_D(eq,  !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2820
FOP_COND_D(ueq, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2821
FOP_COND_D(olt, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2822
FOP_COND_D(ult, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2823
FOP_COND_D(ole, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2824
FOP_COND_D(ule, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2825
/* NOTE: the comma operator will make "cond" to eval to false,
2826
 * but float*_is_unordered() is still called. */
2827
FOP_COND_D(sf,  (float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2828
FOP_COND_D(ngle,float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status))
2829
FOP_COND_D(seq, !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2830
FOP_COND_D(ngl, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2831
FOP_COND_D(lt,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2832
FOP_COND_D(nge, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2833
FOP_COND_D(le,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2834
FOP_COND_D(ngt, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2835

    
2836
#define FOP_COND_S(op, cond)                                   \
2837
void do_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
2838
{                                                              \
2839
    int c = cond;                                              \
2840
    update_fcr31();                                            \
2841
    if (c)                                                     \
2842
        SET_FP_COND(cc, env->active_fpu);                      \
2843
    else                                                       \
2844
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2845
}                                                              \
2846
void do_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2847
{                                                              \
2848
    int c;                                                     \
2849
    fst0 = float32_abs(fst0);                                  \
2850
    fst1 = float32_abs(fst1);                                  \
2851
    c = cond;                                                  \
2852
    update_fcr31();                                            \
2853
    if (c)                                                     \
2854
        SET_FP_COND(cc, env->active_fpu);                      \
2855
    else                                                       \
2856
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2857
}
2858

    
2859
flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
2860
{
2861
    if (float32_is_signaling_nan(a) ||
2862
        float32_is_signaling_nan(b) ||
2863
        (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
2864
        float_raise(float_flag_invalid, status);
2865
        return 1;
2866
    } else if (float32_is_nan(a) || float32_is_nan(b)) {
2867
        return 1;
2868
    } else {
2869
        return 0;
2870
    }
2871
}
2872

    
2873
/* NOTE: the comma operator will make "cond" to eval to false,
2874
 * but float*_is_unordered() is still called. */
2875
FOP_COND_S(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0))
2876
FOP_COND_S(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status))
2877
FOP_COND_S(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2878
FOP_COND_S(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2879
FOP_COND_S(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2880
FOP_COND_S(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2881
FOP_COND_S(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2882
FOP_COND_S(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2883
/* NOTE: the comma operator will make "cond" to eval to false,
2884
 * but float*_is_unordered() is still called. */
2885
FOP_COND_S(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0))
2886
FOP_COND_S(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status))
2887
FOP_COND_S(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2888
FOP_COND_S(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2889
FOP_COND_S(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2890
FOP_COND_S(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2891
FOP_COND_S(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2892
FOP_COND_S(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2893

    
2894
#define FOP_COND_PS(op, condl, condh)                           \
2895
void do_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2896
{                                                               \
2897
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2898
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
2899
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
2900
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
2901
    int cl = condl;                                             \
2902
    int ch = condh;                                             \
2903
                                                                \
2904
    update_fcr31();                                             \
2905
    if (cl)                                                     \
2906
        SET_FP_COND(cc, env->active_fpu);                       \
2907
    else                                                        \
2908
        CLEAR_FP_COND(cc, env->active_fpu);                     \
2909
    if (ch)                                                     \
2910
        SET_FP_COND(cc + 1, env->active_fpu);                   \
2911
    else                                                        \
2912
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
2913
}                                                               \
2914
void do_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2915
{                                                               \
2916
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2917
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
2918
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
2919
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
2920
    int cl = condl;                                             \
2921
    int ch = condh;                                             \
2922
                                                                \
2923
    update_fcr31();                                             \
2924
    if (cl)                                                     \
2925
        SET_FP_COND(cc, env->active_fpu);                       \
2926
    else                                                        \
2927
        CLEAR_FP_COND(cc, env->active_fpu);                     \
2928
    if (ch)                                                     \
2929
        SET_FP_COND(cc + 1, env->active_fpu);                   \
2930
    else                                                        \
2931
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
2932
}
2933

    
2934
/* NOTE: the comma operator will make "cond" to eval to false,
2935
 * but float*_is_unordered() is still called. */
2936
FOP_COND_PS(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0),
2937
                 (float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status), 0))
2938
FOP_COND_PS(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status),
2939
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status))
2940
FOP_COND_PS(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2941
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2942
FOP_COND_PS(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2943
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2944
FOP_COND_PS(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2945
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2946
FOP_COND_PS(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2947
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2948
FOP_COND_PS(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
2949
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
2950
FOP_COND_PS(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
2951
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
2952
/* NOTE: the comma operator will make "cond" to eval to false,
2953
 * but float*_is_unordered() is still called. */
2954
FOP_COND_PS(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0),
2955
                 (float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status), 0))
2956
FOP_COND_PS(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status),
2957
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status))
2958
FOP_COND_PS(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2959
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2960
FOP_COND_PS(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2961
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2962
FOP_COND_PS(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2963
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2964
FOP_COND_PS(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2965
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2966
FOP_COND_PS(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
2967
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
2968
FOP_COND_PS(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
2969
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))