Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ 93fcfe39

History | View | Annotate | Download (88.4 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include <stdlib.h>
21
#include "exec.h"
22

    
23
#include "host-utils.h"
24

    
25
#include "helper.h"
26
/*****************************************************************************/
27
/* Exceptions processing helpers */
28

    
29
void do_raise_exception_err (uint32_t exception, int error_code)
30
{
31
#if 1
32
    if (exception < 0x100)
33
        qemu_log("%s: %d %d\n", __func__, exception, error_code);
34
#endif
35
    env->exception_index = exception;
36
    env->error_code = error_code;
37
    cpu_loop_exit();
38
}
39

    
40
void do_raise_exception (uint32_t exception)
41
{
42
    do_raise_exception_err(exception, 0);
43
}
44

    
45
void do_interrupt_restart (void)
46
{
47
    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
48
        !(env->CP0_Status & (1 << CP0St_ERL)) &&
49
        !(env->hflags & MIPS_HFLAG_DM) &&
50
        (env->CP0_Status & (1 << CP0St_IE)) &&
51
        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
52
        env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
53
        do_raise_exception(EXCP_EXT_INTERRUPT);
54
    }
55
}
56

    
57
#if !defined(CONFIG_USER_ONLY)
58
static void do_restore_state (void *pc_ptr)
59
{
60
    TranslationBlock *tb;
61
    unsigned long pc = (unsigned long) pc_ptr;
62
    
63
    tb = tb_find_pc (pc);
64
    if (tb) {
65
        cpu_restore_state (tb, env, pc, NULL);
66
    }
67
}
68
#endif
69

    
70
target_ulong do_clo (target_ulong t0)
71
{
72
    return clo32(t0);
73
}
74

    
75
target_ulong do_clz (target_ulong t0)
76
{
77
    return clz32(t0);
78
}
79

    
80
#if defined(TARGET_MIPS64)
81
target_ulong do_dclo (target_ulong t0)
82
{
83
    return clo64(t0);
84
}
85

    
86
target_ulong do_dclz (target_ulong t0)
87
{
88
    return clz64(t0);
89
}
90
#endif /* TARGET_MIPS64 */
91

    
92
/* 64 bits arithmetic for 32 bits hosts */
93
static inline uint64_t get_HILO (void)
94
{
95
    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
96
}
97

    
98
static inline void set_HILO (uint64_t HILO)
99
{
100
    env->active_tc.LO[0] = (int32_t)HILO;
101
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
102
}
103

    
104
static inline void set_HIT0_LO (target_ulong t0, uint64_t HILO)
105
{
106
    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
107
    t0 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
108
}
109

    
110
static inline void set_HI_LOT0 (target_ulong t0, uint64_t HILO)
111
{
112
    t0 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
113
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
114
}
115

    
116
#if TARGET_LONG_BITS > HOST_LONG_BITS
117
void do_madd (target_ulong t0, target_ulong t1)
118
{
119
    int64_t tmp;
120

    
121
    tmp = ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
122
    set_HILO((int64_t)get_HILO() + tmp);
123
}
124

    
125
void do_maddu (target_ulong t0, target_ulong t1)
126
{
127
    uint64_t tmp;
128

    
129
    tmp = ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
130
    set_HILO(get_HILO() + tmp);
131
}
132

    
133
void do_msub (target_ulong t0, target_ulong t1)
134
{
135
    int64_t tmp;
136

    
137
    tmp = ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
138
    set_HILO((int64_t)get_HILO() - tmp);
139
}
140

    
141
void do_msubu (target_ulong t0, target_ulong t1)
142
{
143
    uint64_t tmp;
144

    
145
    tmp = ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
146
    set_HILO(get_HILO() - tmp);
147
}
148
#endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
149

    
150
/* Multiplication variants of the vr54xx. */
151
target_ulong do_muls (target_ulong t0, target_ulong t1)
152
{
153
    set_HI_LOT0(t0, 0 - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
154

    
155
    return t0;
156
}
157

    
158
target_ulong do_mulsu (target_ulong t0, target_ulong t1)
159
{
160
    set_HI_LOT0(t0, 0 - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
161

    
162
    return t0;
163
}
164

    
165
target_ulong do_macc (target_ulong t0, target_ulong t1)
166
{
167
    set_HI_LOT0(t0, ((int64_t)get_HILO()) + ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
168

    
169
    return t0;
170
}
171

    
172
target_ulong do_macchi (target_ulong t0, target_ulong t1)
173
{
174
    set_HIT0_LO(t0, ((int64_t)get_HILO()) + ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
175

    
176
    return t0;
177
}
178

    
179
target_ulong do_maccu (target_ulong t0, target_ulong t1)
180
{
181
    set_HI_LOT0(t0, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
182

    
183
    return t0;
184
}
185

    
186
target_ulong do_macchiu (target_ulong t0, target_ulong t1)
187
{
188
    set_HIT0_LO(t0, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
189

    
190
    return t0;
191
}
192

    
193
target_ulong do_msac (target_ulong t0, target_ulong t1)
194
{
195
    set_HI_LOT0(t0, ((int64_t)get_HILO()) - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
196

    
197
    return t0;
198
}
199

    
200
target_ulong do_msachi (target_ulong t0, target_ulong t1)
201
{
202
    set_HIT0_LO(t0, ((int64_t)get_HILO()) - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
203

    
204
    return t0;
205
}
206

    
207
target_ulong do_msacu (target_ulong t0, target_ulong t1)
208
{
209
    set_HI_LOT0(t0, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
210

    
211
    return t0;
212
}
213

    
214
target_ulong do_msachiu (target_ulong t0, target_ulong t1)
215
{
216
    set_HIT0_LO(t0, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
217

    
218
    return t0;
219
}
220

    
221
target_ulong do_mulhi (target_ulong t0, target_ulong t1)
222
{
223
    set_HIT0_LO(t0, (int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
224

    
225
    return t0;
226
}
227

    
228
target_ulong do_mulhiu (target_ulong t0, target_ulong t1)
229
{
230
    set_HIT0_LO(t0, (uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
231

    
232
    return t0;
233
}
234

    
235
target_ulong do_mulshi (target_ulong t0, target_ulong t1)
236
{
237
    set_HIT0_LO(t0, 0 - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
238

    
239
    return t0;
240
}
241

    
242
target_ulong do_mulshiu (target_ulong t0, target_ulong t1)
243
{
244
    set_HIT0_LO(t0, 0 - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
245

    
246
    return t0;
247
}
248

    
249
#ifdef TARGET_MIPS64
250
void do_dmult (target_ulong t0, target_ulong t1)
251
{
252
    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), t0, t1);
253
}
254

    
255
void do_dmultu (target_ulong t0, target_ulong t1)
256
{
257
    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), t0, t1);
258
}
259
#endif
260

    
261
#ifdef TARGET_WORDS_BIGENDIAN
262
#define GET_LMASK(v) ((v) & 3)
263
#define GET_OFFSET(addr, offset) (addr + (offset))
264
#else
265
#define GET_LMASK(v) (((v) & 3) ^ 3)
266
#define GET_OFFSET(addr, offset) (addr - (offset))
267
#endif
268

    
269
target_ulong do_lwl(target_ulong t0, target_ulong t1, int mem_idx)
270
{
271
    target_ulong tmp;
272

    
273
#ifdef CONFIG_USER_ONLY
274
#define ldfun ldub_raw
275
#else
276
    int (*ldfun)(target_ulong);
277

    
278
    switch (mem_idx)
279
    {
280
    case 0: ldfun = ldub_kernel; break;
281
    case 1: ldfun = ldub_super; break;
282
    default:
283
    case 2: ldfun = ldub_user; break;
284
    }
285
#endif
286
    tmp = ldfun(t0);
287
    t1 = (t1 & 0x00FFFFFF) | (tmp << 24);
288

    
289
    if (GET_LMASK(t0) <= 2) {
290
        tmp = ldfun(GET_OFFSET(t0, 1));
291
        t1 = (t1 & 0xFF00FFFF) | (tmp << 16);
292
    }
293

    
294
    if (GET_LMASK(t0) <= 1) {
295
        tmp = ldfun(GET_OFFSET(t0, 2));
296
        t1 = (t1 & 0xFFFF00FF) | (tmp << 8);
297
    }
298

    
299
    if (GET_LMASK(t0) == 0) {
300
        tmp = ldfun(GET_OFFSET(t0, 3));
301
        t1 = (t1 & 0xFFFFFF00) | tmp;
302
    }
303
    return (int32_t)t1;
304
}
305

    
306
target_ulong do_lwr(target_ulong t0, target_ulong t1, int mem_idx)
307
{
308
    target_ulong tmp;
309

    
310
#ifdef CONFIG_USER_ONLY
311
#define ldfun ldub_raw
312
#else
313
    int (*ldfun)(target_ulong);
314

    
315
    switch (mem_idx)
316
    {
317
    case 0: ldfun = ldub_kernel; break;
318
    case 1: ldfun = ldub_super; break;
319
    default:
320
    case 2: ldfun = ldub_user; break;
321
    }
322
#endif
323
    tmp = ldfun(t0);
324
    t1 = (t1 & 0xFFFFFF00) | tmp;
325

    
326
    if (GET_LMASK(t0) >= 1) {
327
        tmp = ldfun(GET_OFFSET(t0, -1));
328
        t1 = (t1 & 0xFFFF00FF) | (tmp << 8);
329
    }
330

    
331
    if (GET_LMASK(t0) >= 2) {
332
        tmp = ldfun(GET_OFFSET(t0, -2));
333
        t1 = (t1 & 0xFF00FFFF) | (tmp << 16);
334
    }
335

    
336
    if (GET_LMASK(t0) == 3) {
337
        tmp = ldfun(GET_OFFSET(t0, -3));
338
        t1 = (t1 & 0x00FFFFFF) | (tmp << 24);
339
    }
340
    return (int32_t)t1;
341
}
342

    
343
void do_swl(target_ulong t0, target_ulong t1, int mem_idx)
344
{
345
#ifdef CONFIG_USER_ONLY
346
#define stfun stb_raw
347
#else
348
    void (*stfun)(target_ulong, int);
349

    
350
    switch (mem_idx)
351
    {
352
    case 0: stfun = stb_kernel; break;
353
    case 1: stfun = stb_super; break;
354
    default:
355
    case 2: stfun = stb_user; break;
356
    }
357
#endif
358
    stfun(t0, (uint8_t)(t1 >> 24));
359

    
360
    if (GET_LMASK(t0) <= 2)
361
        stfun(GET_OFFSET(t0, 1), (uint8_t)(t1 >> 16));
362

    
363
    if (GET_LMASK(t0) <= 1)
364
        stfun(GET_OFFSET(t0, 2), (uint8_t)(t1 >> 8));
365

    
366
    if (GET_LMASK(t0) == 0)
367
        stfun(GET_OFFSET(t0, 3), (uint8_t)t1);
368
}
369

    
370
void do_swr(target_ulong t0, target_ulong t1, int mem_idx)
371
{
372
#ifdef CONFIG_USER_ONLY
373
#define stfun stb_raw
374
#else
375
    void (*stfun)(target_ulong, int);
376

    
377
    switch (mem_idx)
378
    {
379
    case 0: stfun = stb_kernel; break;
380
    case 1: stfun = stb_super; break;
381
    default:
382
    case 2: stfun = stb_user; break;
383
    }
384
#endif
385
    stfun(t0, (uint8_t)t1);
386

    
387
    if (GET_LMASK(t0) >= 1)
388
        stfun(GET_OFFSET(t0, -1), (uint8_t)(t1 >> 8));
389

    
390
    if (GET_LMASK(t0) >= 2)
391
        stfun(GET_OFFSET(t0, -2), (uint8_t)(t1 >> 16));
392

    
393
    if (GET_LMASK(t0) == 3)
394
        stfun(GET_OFFSET(t0, -3), (uint8_t)(t1 >> 24));
395
}
396

    
397
#if defined(TARGET_MIPS64)
398
/* "half" load and stores.  We must do the memory access inline,
399
   or fault handling won't work.  */
400

    
401
#ifdef TARGET_WORDS_BIGENDIAN
402
#define GET_LMASK64(v) ((v) & 7)
403
#else
404
#define GET_LMASK64(v) (((v) & 7) ^ 7)
405
#endif
406

    
407
target_ulong do_ldl(target_ulong t0, target_ulong t1, int mem_idx)
408
{
409
    uint64_t tmp;
410

    
411
#ifdef CONFIG_USER_ONLY
412
#define ldfun ldub_raw
413
#else
414
    int (*ldfun)(target_ulong);
415

    
416
    switch (mem_idx)
417
    {
418
    case 0: ldfun = ldub_kernel; break;
419
    case 1: ldfun = ldub_super; break;
420
    default:
421
    case 2: ldfun = ldub_user; break;
422
    }
423
#endif
424
    tmp = ldfun(t0);
425
    t1 = (t1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
426

    
427
    if (GET_LMASK64(t0) <= 6) {
428
        tmp = ldfun(GET_OFFSET(t0, 1));
429
        t1 = (t1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
430
    }
431

    
432
    if (GET_LMASK64(t0) <= 5) {
433
        tmp = ldfun(GET_OFFSET(t0, 2));
434
        t1 = (t1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
435
    }
436

    
437
    if (GET_LMASK64(t0) <= 4) {
438
        tmp = ldfun(GET_OFFSET(t0, 3));
439
        t1 = (t1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
440
    }
441

    
442
    if (GET_LMASK64(t0) <= 3) {
443
        tmp = ldfun(GET_OFFSET(t0, 4));
444
        t1 = (t1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
445
    }
446

    
447
    if (GET_LMASK64(t0) <= 2) {
448
        tmp = ldfun(GET_OFFSET(t0, 5));
449
        t1 = (t1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
450
    }
451

    
452
    if (GET_LMASK64(t0) <= 1) {
453
        tmp = ldfun(GET_OFFSET(t0, 6));
454
        t1 = (t1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
455
    }
456

    
457
    if (GET_LMASK64(t0) == 0) {
458
        tmp = ldfun(GET_OFFSET(t0, 7));
459
        t1 = (t1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
460
    }
461

    
462
    return t1;
463
}
464

    
465
target_ulong do_ldr(target_ulong t0, target_ulong t1, int mem_idx)
466
{
467
    uint64_t tmp;
468

    
469
#ifdef CONFIG_USER_ONLY
470
#define ldfun ldub_raw
471
#else
472
    int (*ldfun)(target_ulong);
473

    
474
    switch (mem_idx)
475
    {
476
    case 0: ldfun = ldub_kernel; break;
477
    case 1: ldfun = ldub_super; break;
478
    default:
479
    case 2: ldfun = ldub_user; break;
480
    }
481
#endif
482
    tmp = ldfun(t0);
483
    t1 = (t1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
484

    
485
    if (GET_LMASK64(t0) >= 1) {
486
        tmp = ldfun(GET_OFFSET(t0, -1));
487
        t1 = (t1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
488
    }
489

    
490
    if (GET_LMASK64(t0) >= 2) {
491
        tmp = ldfun(GET_OFFSET(t0, -2));
492
        t1 = (t1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
493
    }
494

    
495
    if (GET_LMASK64(t0) >= 3) {
496
        tmp = ldfun(GET_OFFSET(t0, -3));
497
        t1 = (t1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
498
    }
499

    
500
    if (GET_LMASK64(t0) >= 4) {
501
        tmp = ldfun(GET_OFFSET(t0, -4));
502
        t1 = (t1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
503
    }
504

    
505
    if (GET_LMASK64(t0) >= 5) {
506
        tmp = ldfun(GET_OFFSET(t0, -5));
507
        t1 = (t1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
508
    }
509

    
510
    if (GET_LMASK64(t0) >= 6) {
511
        tmp = ldfun(GET_OFFSET(t0, -6));
512
        t1 = (t1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
513
    }
514

    
515
    if (GET_LMASK64(t0) == 7) {
516
        tmp = ldfun(GET_OFFSET(t0, -7));
517
        t1 = (t1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
518
    }
519

    
520
    return t1;
521
}
522

    
523
void do_sdl(target_ulong t0, target_ulong t1, int mem_idx)
524
{
525
#ifdef CONFIG_USER_ONLY
526
#define stfun stb_raw
527
#else
528
    void (*stfun)(target_ulong, int);
529

    
530
    switch (mem_idx)
531
    {
532
    case 0: stfun = stb_kernel; break;
533
    case 1: stfun = stb_super; break;
534
    default:
535
    case 2: stfun = stb_user; break;
536
    }
537
#endif
538
    stfun(t0, (uint8_t)(t1 >> 56));
539

    
540
    if (GET_LMASK64(t0) <= 6)
541
        stfun(GET_OFFSET(t0, 1), (uint8_t)(t1 >> 48));
542

    
543
    if (GET_LMASK64(t0) <= 5)
544
        stfun(GET_OFFSET(t0, 2), (uint8_t)(t1 >> 40));
545

    
546
    if (GET_LMASK64(t0) <= 4)
547
        stfun(GET_OFFSET(t0, 3), (uint8_t)(t1 >> 32));
548

    
549
    if (GET_LMASK64(t0) <= 3)
550
        stfun(GET_OFFSET(t0, 4), (uint8_t)(t1 >> 24));
551

    
552
    if (GET_LMASK64(t0) <= 2)
553
        stfun(GET_OFFSET(t0, 5), (uint8_t)(t1 >> 16));
554

    
555
    if (GET_LMASK64(t0) <= 1)
556
        stfun(GET_OFFSET(t0, 6), (uint8_t)(t1 >> 8));
557

    
558
    if (GET_LMASK64(t0) <= 0)
559
        stfun(GET_OFFSET(t0, 7), (uint8_t)t1);
560
}
561

    
562
void do_sdr(target_ulong t0, target_ulong t1, int mem_idx)
563
{
564
#ifdef CONFIG_USER_ONLY
565
#define stfun stb_raw
566
#else
567
    void (*stfun)(target_ulong, int);
568

    
569
    switch (mem_idx)
570
    {
571
    case 0: stfun = stb_kernel; break;
572
    case 1: stfun = stb_super; break;
573
     default:
574
    case 2: stfun = stb_user; break;
575
    }
576
#endif
577
    stfun(t0, (uint8_t)t1);
578

    
579
    if (GET_LMASK64(t0) >= 1)
580
        stfun(GET_OFFSET(t0, -1), (uint8_t)(t1 >> 8));
581

    
582
    if (GET_LMASK64(t0) >= 2)
583
        stfun(GET_OFFSET(t0, -2), (uint8_t)(t1 >> 16));
584

    
585
    if (GET_LMASK64(t0) >= 3)
586
        stfun(GET_OFFSET(t0, -3), (uint8_t)(t1 >> 24));
587

    
588
    if (GET_LMASK64(t0) >= 4)
589
        stfun(GET_OFFSET(t0, -4), (uint8_t)(t1 >> 32));
590

    
591
    if (GET_LMASK64(t0) >= 5)
592
        stfun(GET_OFFSET(t0, -5), (uint8_t)(t1 >> 40));
593

    
594
    if (GET_LMASK64(t0) >= 6)
595
        stfun(GET_OFFSET(t0, -6), (uint8_t)(t1 >> 48));
596

    
597
    if (GET_LMASK64(t0) == 7)
598
        stfun(GET_OFFSET(t0, -7), (uint8_t)(t1 >> 56));
599
}
600
#endif /* TARGET_MIPS64 */
601

    
602
#ifndef CONFIG_USER_ONLY
603
/* CP0 helpers */
604
target_ulong do_mfc0_mvpcontrol (void)
605
{
606
    return env->mvp->CP0_MVPControl;
607
}
608

    
609
target_ulong do_mfc0_mvpconf0 (void)
610
{
611
    return env->mvp->CP0_MVPConf0;
612
}
613

    
614
target_ulong do_mfc0_mvpconf1 (void)
615
{
616
    return env->mvp->CP0_MVPConf1;
617
}
618

    
619
target_ulong do_mfc0_random (void)
620
{
621
    return (int32_t)cpu_mips_get_random(env);
622
}
623

    
624
target_ulong do_mfc0_tcstatus (void)
625
{
626
    return env->active_tc.CP0_TCStatus;
627
}
628

    
629
target_ulong do_mftc0_tcstatus(void)
630
{
631
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
632

    
633
    if (other_tc == env->current_tc)
634
        return env->active_tc.CP0_TCStatus;
635
    else
636
        return env->tcs[other_tc].CP0_TCStatus;
637
}
638

    
639
target_ulong do_mfc0_tcbind (void)
640
{
641
    return env->active_tc.CP0_TCBind;
642
}
643

    
644
target_ulong do_mftc0_tcbind(void)
645
{
646
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
647

    
648
    if (other_tc == env->current_tc)
649
        return env->active_tc.CP0_TCBind;
650
    else
651
        return env->tcs[other_tc].CP0_TCBind;
652
}
653

    
654
target_ulong do_mfc0_tcrestart (void)
655
{
656
    return env->active_tc.PC;
657
}
658

    
659
target_ulong do_mftc0_tcrestart(void)
660
{
661
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
662

    
663
    if (other_tc == env->current_tc)
664
        return env->active_tc.PC;
665
    else
666
        return env->tcs[other_tc].PC;
667
}
668

    
669
target_ulong do_mfc0_tchalt (void)
670
{
671
    return env->active_tc.CP0_TCHalt;
672
}
673

    
674
target_ulong do_mftc0_tchalt(void)
675
{
676
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
677

    
678
    if (other_tc == env->current_tc)
679
        return env->active_tc.CP0_TCHalt;
680
    else
681
        return env->tcs[other_tc].CP0_TCHalt;
682
}
683

    
684
target_ulong do_mfc0_tccontext (void)
685
{
686
    return env->active_tc.CP0_TCContext;
687
}
688

    
689
target_ulong do_mftc0_tccontext(void)
690
{
691
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
692

    
693
    if (other_tc == env->current_tc)
694
        return env->active_tc.CP0_TCContext;
695
    else
696
        return env->tcs[other_tc].CP0_TCContext;
697
}
698

    
699
target_ulong do_mfc0_tcschedule (void)
700
{
701
    return env->active_tc.CP0_TCSchedule;
702
}
703

    
704
target_ulong do_mftc0_tcschedule(void)
705
{
706
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
707

    
708
    if (other_tc == env->current_tc)
709
        return env->active_tc.CP0_TCSchedule;
710
    else
711
        return env->tcs[other_tc].CP0_TCSchedule;
712
}
713

    
714
target_ulong do_mfc0_tcschefback (void)
715
{
716
    return env->active_tc.CP0_TCScheFBack;
717
}
718

    
719
target_ulong do_mftc0_tcschefback(void)
720
{
721
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
722

    
723
    if (other_tc == env->current_tc)
724
        return env->active_tc.CP0_TCScheFBack;
725
    else
726
        return env->tcs[other_tc].CP0_TCScheFBack;
727
}
728

    
729
target_ulong do_mfc0_count (void)
730
{
731
    return (int32_t)cpu_mips_get_count(env);
732
}
733

    
734
target_ulong do_mftc0_entryhi(void)
735
{
736
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
737
    int32_t tcstatus;
738

    
739
    if (other_tc == env->current_tc)
740
        tcstatus = env->active_tc.CP0_TCStatus;
741
    else
742
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
743

    
744
    return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
745
}
746

    
747
target_ulong do_mftc0_status(void)
748
{
749
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
750
    target_ulong t0;
751
    int32_t tcstatus;
752

    
753
    if (other_tc == env->current_tc)
754
        tcstatus = env->active_tc.CP0_TCStatus;
755
    else
756
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
757

    
758
    t0 = env->CP0_Status & ~0xf1000018;
759
    t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
760
    t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
761
    t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
762

    
763
    return t0;
764
}
765

    
766
target_ulong do_mfc0_lladdr (void)
767
{
768
    return (int32_t)env->CP0_LLAddr >> 4;
769
}
770

    
771
target_ulong do_mfc0_watchlo (uint32_t sel)
772
{
773
    return (int32_t)env->CP0_WatchLo[sel];
774
}
775

    
776
target_ulong do_mfc0_watchhi (uint32_t sel)
777
{
778
    return env->CP0_WatchHi[sel];
779
}
780

    
781
target_ulong do_mfc0_debug (void)
782
{
783
    target_ulong t0 = env->CP0_Debug;
784
    if (env->hflags & MIPS_HFLAG_DM)
785
        t0 |= 1 << CP0DB_DM;
786

    
787
    return t0;
788
}
789

    
790
target_ulong do_mftc0_debug(void)
791
{
792
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
793
    int32_t tcstatus;
794

    
795
    if (other_tc == env->current_tc)
796
        tcstatus = env->active_tc.CP0_Debug_tcstatus;
797
    else
798
        tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
799

    
800
    /* XXX: Might be wrong, check with EJTAG spec. */
801
    return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
802
            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
803
}
804

    
805
#if defined(TARGET_MIPS64)
806
target_ulong do_dmfc0_tcrestart (void)
807
{
808
    return env->active_tc.PC;
809
}
810

    
811
target_ulong do_dmfc0_tchalt (void)
812
{
813
    return env->active_tc.CP0_TCHalt;
814
}
815

    
816
target_ulong do_dmfc0_tccontext (void)
817
{
818
    return env->active_tc.CP0_TCContext;
819
}
820

    
821
target_ulong do_dmfc0_tcschedule (void)
822
{
823
    return env->active_tc.CP0_TCSchedule;
824
}
825

    
826
target_ulong do_dmfc0_tcschefback (void)
827
{
828
    return env->active_tc.CP0_TCScheFBack;
829
}
830

    
831
target_ulong do_dmfc0_lladdr (void)
832
{
833
    return env->CP0_LLAddr >> 4;
834
}
835

    
836
target_ulong do_dmfc0_watchlo (uint32_t sel)
837
{
838
    return env->CP0_WatchLo[sel];
839
}
840
#endif /* TARGET_MIPS64 */
841

    
842
void do_mtc0_index (target_ulong t0)
843
{
844
    int num = 1;
845
    unsigned int tmp = env->tlb->nb_tlb;
846

    
847
    do {
848
        tmp >>= 1;
849
        num <<= 1;
850
    } while (tmp);
851
    env->CP0_Index = (env->CP0_Index & 0x80000000) | (t0 & (num - 1));
852
}
853

    
854
void do_mtc0_mvpcontrol (target_ulong t0)
855
{
856
    uint32_t mask = 0;
857
    uint32_t newval;
858

    
859
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
860
        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
861
                (1 << CP0MVPCo_EVP);
862
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
863
        mask |= (1 << CP0MVPCo_STLB);
864
    newval = (env->mvp->CP0_MVPControl & ~mask) | (t0 & mask);
865

    
866
    // TODO: Enable/disable shared TLB, enable/disable VPEs.
867

    
868
    env->mvp->CP0_MVPControl = newval;
869
}
870

    
871
void do_mtc0_vpecontrol (target_ulong t0)
872
{
873
    uint32_t mask;
874
    uint32_t newval;
875

    
876
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
877
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
878
    newval = (env->CP0_VPEControl & ~mask) | (t0 & mask);
879

    
880
    /* Yield scheduler intercept not implemented. */
881
    /* Gating storage scheduler intercept not implemented. */
882

    
883
    // TODO: Enable/disable TCs.
884

    
885
    env->CP0_VPEControl = newval;
886
}
887

    
888
void do_mtc0_vpeconf0 (target_ulong t0)
889
{
890
    uint32_t mask = 0;
891
    uint32_t newval;
892

    
893
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
894
        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
895
            mask |= (0xff << CP0VPEC0_XTC);
896
        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
897
    }
898
    newval = (env->CP0_VPEConf0 & ~mask) | (t0 & mask);
899

    
900
    // TODO: TC exclusive handling due to ERL/EXL.
901

    
902
    env->CP0_VPEConf0 = newval;
903
}
904

    
905
void do_mtc0_vpeconf1 (target_ulong t0)
906
{
907
    uint32_t mask = 0;
908
    uint32_t newval;
909

    
910
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
911
        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
912
                (0xff << CP0VPEC1_NCP1);
913
    newval = (env->CP0_VPEConf1 & ~mask) | (t0 & mask);
914

    
915
    /* UDI not implemented. */
916
    /* CP2 not implemented. */
917

    
918
    // TODO: Handle FPU (CP1) binding.
919

    
920
    env->CP0_VPEConf1 = newval;
921
}
922

    
923
void do_mtc0_yqmask (target_ulong t0)
924
{
925
    /* Yield qualifier inputs not implemented. */
926
    env->CP0_YQMask = 0x00000000;
927
}
928

    
929
void do_mtc0_vpeopt (target_ulong t0)
930
{
931
    env->CP0_VPEOpt = t0 & 0x0000ffff;
932
}
933

    
934
void do_mtc0_entrylo0 (target_ulong t0)
935
{
936
    /* Large physaddr (PABITS) not implemented */
937
    /* 1k pages not implemented */
938
    env->CP0_EntryLo0 = t0 & 0x3FFFFFFF;
939
}
940

    
941
void do_mtc0_tcstatus (target_ulong t0)
942
{
943
    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
944
    uint32_t newval;
945

    
946
    newval = (env->active_tc.CP0_TCStatus & ~mask) | (t0 & mask);
947

    
948
    // TODO: Sync with CP0_Status.
949

    
950
    env->active_tc.CP0_TCStatus = newval;
951
}
952

    
953
void do_mttc0_tcstatus (target_ulong t0)
954
{
955
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
956

    
957
    // TODO: Sync with CP0_Status.
958

    
959
    if (other_tc == env->current_tc)
960
        env->active_tc.CP0_TCStatus = t0;
961
    else
962
        env->tcs[other_tc].CP0_TCStatus = t0;
963
}
964

    
965
void do_mtc0_tcbind (target_ulong t0)
966
{
967
    uint32_t mask = (1 << CP0TCBd_TBE);
968
    uint32_t newval;
969

    
970
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
971
        mask |= (1 << CP0TCBd_CurVPE);
972
    newval = (env->active_tc.CP0_TCBind & ~mask) | (t0 & mask);
973
    env->active_tc.CP0_TCBind = newval;
974
}
975

    
976
void do_mttc0_tcbind (target_ulong t0)
977
{
978
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
979
    uint32_t mask = (1 << CP0TCBd_TBE);
980
    uint32_t newval;
981

    
982
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
983
        mask |= (1 << CP0TCBd_CurVPE);
984
    if (other_tc == env->current_tc) {
985
        newval = (env->active_tc.CP0_TCBind & ~mask) | (t0 & mask);
986
        env->active_tc.CP0_TCBind = newval;
987
    } else {
988
        newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (t0 & mask);
989
        env->tcs[other_tc].CP0_TCBind = newval;
990
    }
991
}
992

    
993
void do_mtc0_tcrestart (target_ulong t0)
994
{
995
    env->active_tc.PC = t0;
996
    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
997
    env->CP0_LLAddr = 0ULL;
998
    /* MIPS16 not implemented. */
999
}
1000

    
1001
void do_mttc0_tcrestart (target_ulong t0)
1002
{
1003
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1004

    
1005
    if (other_tc == env->current_tc) {
1006
        env->active_tc.PC = t0;
1007
        env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1008
        env->CP0_LLAddr = 0ULL;
1009
        /* MIPS16 not implemented. */
1010
    } else {
1011
        env->tcs[other_tc].PC = t0;
1012
        env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1013
        env->CP0_LLAddr = 0ULL;
1014
        /* MIPS16 not implemented. */
1015
    }
1016
}
1017

    
1018
void do_mtc0_tchalt (target_ulong t0)
1019
{
1020
    env->active_tc.CP0_TCHalt = t0 & 0x1;
1021

    
1022
    // TODO: Halt TC / Restart (if allocated+active) TC.
1023
}
1024

    
1025
void do_mttc0_tchalt (target_ulong t0)
1026
{
1027
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1028

    
1029
    // TODO: Halt TC / Restart (if allocated+active) TC.
1030

    
1031
    if (other_tc == env->current_tc)
1032
        env->active_tc.CP0_TCHalt = t0;
1033
    else
1034
        env->tcs[other_tc].CP0_TCHalt = t0;
1035
}
1036

    
1037
void do_mtc0_tccontext (target_ulong t0)
1038
{
1039
    env->active_tc.CP0_TCContext = t0;
1040
}
1041

    
1042
void do_mttc0_tccontext (target_ulong t0)
1043
{
1044
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1045

    
1046
    if (other_tc == env->current_tc)
1047
        env->active_tc.CP0_TCContext = t0;
1048
    else
1049
        env->tcs[other_tc].CP0_TCContext = t0;
1050
}
1051

    
1052
void do_mtc0_tcschedule (target_ulong t0)
1053
{
1054
    env->active_tc.CP0_TCSchedule = t0;
1055
}
1056

    
1057
void do_mttc0_tcschedule (target_ulong t0)
1058
{
1059
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1060

    
1061
    if (other_tc == env->current_tc)
1062
        env->active_tc.CP0_TCSchedule = t0;
1063
    else
1064
        env->tcs[other_tc].CP0_TCSchedule = t0;
1065
}
1066

    
1067
void do_mtc0_tcschefback (target_ulong t0)
1068
{
1069
    env->active_tc.CP0_TCScheFBack = t0;
1070
}
1071

    
1072
void do_mttc0_tcschefback (target_ulong t0)
1073
{
1074
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1075

    
1076
    if (other_tc == env->current_tc)
1077
        env->active_tc.CP0_TCScheFBack = t0;
1078
    else
1079
        env->tcs[other_tc].CP0_TCScheFBack = t0;
1080
}
1081

    
1082
void do_mtc0_entrylo1 (target_ulong t0)
1083
{
1084
    /* Large physaddr (PABITS) not implemented */
1085
    /* 1k pages not implemented */
1086
    env->CP0_EntryLo1 = t0 & 0x3FFFFFFF;
1087
}
1088

    
1089
void do_mtc0_context (target_ulong t0)
1090
{
1091
    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (t0 & ~0x007FFFFF);
1092
}
1093

    
1094
void do_mtc0_pagemask (target_ulong t0)
1095
{
1096
    /* 1k pages not implemented */
1097
    env->CP0_PageMask = t0 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1098
}
1099

    
1100
void do_mtc0_pagegrain (target_ulong t0)
1101
{
1102
    /* SmartMIPS not implemented */
1103
    /* Large physaddr (PABITS) not implemented */
1104
    /* 1k pages not implemented */
1105
    env->CP0_PageGrain = 0;
1106
}
1107

    
1108
void do_mtc0_wired (target_ulong t0)
1109
{
1110
    env->CP0_Wired = t0 % env->tlb->nb_tlb;
1111
}
1112

    
1113
void do_mtc0_srsconf0 (target_ulong t0)
1114
{
1115
    env->CP0_SRSConf0 |= t0 & env->CP0_SRSConf0_rw_bitmask;
1116
}
1117

    
1118
void do_mtc0_srsconf1 (target_ulong t0)
1119
{
1120
    env->CP0_SRSConf1 |= t0 & env->CP0_SRSConf1_rw_bitmask;
1121
}
1122

    
1123
void do_mtc0_srsconf2 (target_ulong t0)
1124
{
1125
    env->CP0_SRSConf2 |= t0 & env->CP0_SRSConf2_rw_bitmask;
1126
}
1127

    
1128
void do_mtc0_srsconf3 (target_ulong t0)
1129
{
1130
    env->CP0_SRSConf3 |= t0 & env->CP0_SRSConf3_rw_bitmask;
1131
}
1132

    
1133
void do_mtc0_srsconf4 (target_ulong t0)
1134
{
1135
    env->CP0_SRSConf4 |= t0 & env->CP0_SRSConf4_rw_bitmask;
1136
}
1137

    
1138
void do_mtc0_hwrena (target_ulong t0)
1139
{
1140
    env->CP0_HWREna = t0 & 0x0000000F;
1141
}
1142

    
1143
void do_mtc0_count (target_ulong t0)
1144
{
1145
    cpu_mips_store_count(env, t0);
1146
}
1147

    
1148
void do_mtc0_entryhi (target_ulong t0)
1149
{
1150
    target_ulong old, val;
1151

    
1152
    /* 1k pages not implemented */
1153
    val = t0 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1154
#if defined(TARGET_MIPS64)
1155
    val &= env->SEGMask;
1156
#endif
1157
    old = env->CP0_EntryHi;
1158
    env->CP0_EntryHi = val;
1159
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1160
        uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
1161
        env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
1162
    }
1163
    /* If the ASID changes, flush qemu's TLB.  */
1164
    if ((old & 0xFF) != (val & 0xFF))
1165
        cpu_mips_tlb_flush(env, 1);
1166
}
1167

    
1168
void do_mttc0_entryhi(target_ulong t0)
1169
{
1170
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1171
    int32_t tcstatus;
1172

    
1173
    env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (t0 & ~0xff);
1174
    if (other_tc == env->current_tc) {
1175
        tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (t0 & 0xff);
1176
        env->active_tc.CP0_TCStatus = tcstatus;
1177
    } else {
1178
        tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (t0 & 0xff);
1179
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1180
    }
1181
}
1182

    
1183
void do_mtc0_compare (target_ulong t0)
1184
{
1185
    cpu_mips_store_compare(env, t0);
1186
}
1187

    
1188
void do_mtc0_status (target_ulong t0)
1189
{
1190
    uint32_t val, old;
1191
    uint32_t mask = env->CP0_Status_rw_bitmask;
1192

    
1193
    val = t0 & mask;
1194
    old = env->CP0_Status;
1195
    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1196
    compute_hflags(env);
1197
    if (loglevel & CPU_LOG_EXEC)
1198
        do_mtc0_status_debug(old, val);
1199
    cpu_mips_update_irq(env);
1200
}
1201

    
1202
void do_mttc0_status(target_ulong t0)
1203
{
1204
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1205
    int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
1206

    
1207
    env->CP0_Status = t0 & ~0xf1000018;
1208
    tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (t0 & (0xf << CP0St_CU0));
1209
    tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((t0 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
1210
    tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((t0 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
1211
    if (other_tc == env->current_tc)
1212
        env->active_tc.CP0_TCStatus = tcstatus;
1213
    else
1214
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1215
}
1216

    
1217
void do_mtc0_intctl (target_ulong t0)
1218
{
1219
    /* vectored interrupts not implemented, no performance counters. */
1220
    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (t0 & 0x000002e0);
1221
}
1222

    
1223
void do_mtc0_srsctl (target_ulong t0)
1224
{
1225
    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1226
    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (t0 & mask);
1227
}
1228

    
1229
void do_mtc0_cause (target_ulong t0)
1230
{
1231
    uint32_t mask = 0x00C00300;
1232
    uint32_t old = env->CP0_Cause;
1233

    
1234
    if (env->insn_flags & ISA_MIPS32R2)
1235
        mask |= 1 << CP0Ca_DC;
1236

    
1237
    env->CP0_Cause = (env->CP0_Cause & ~mask) | (t0 & mask);
1238

    
1239
    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1240
        if (env->CP0_Cause & (1 << CP0Ca_DC))
1241
            cpu_mips_stop_count(env);
1242
        else
1243
            cpu_mips_start_count(env);
1244
    }
1245

    
1246
    /* Handle the software interrupt as an hardware one, as they
1247
       are very similar */
1248
    if (t0 & CP0Ca_IP_mask) {
1249
        cpu_mips_update_irq(env);
1250
    }
1251
}
1252

    
1253
void do_mtc0_ebase (target_ulong t0)
1254
{
1255
    /* vectored interrupts not implemented */
1256
    /* Multi-CPU not implemented */
1257
    env->CP0_EBase = 0x80000000 | (t0 & 0x3FFFF000);
1258
}
1259

    
1260
void do_mtc0_config0 (target_ulong t0)
1261
{
1262
    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (t0 & 0x00000007);
1263
}
1264

    
1265
void do_mtc0_config2 (target_ulong t0)
1266
{
1267
    /* tertiary/secondary caches not implemented */
1268
    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1269
}
1270

    
1271
void do_mtc0_watchlo (target_ulong t0, uint32_t sel)
1272
{
1273
    /* Watch exceptions for instructions, data loads, data stores
1274
       not implemented. */
1275
    env->CP0_WatchLo[sel] = (t0 & ~0x7);
1276
}
1277

    
1278
void do_mtc0_watchhi (target_ulong t0, uint32_t sel)
1279
{
1280
    env->CP0_WatchHi[sel] = (t0 & 0x40FF0FF8);
1281
    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & t0 & 0x7);
1282
}
1283

    
1284
void do_mtc0_xcontext (target_ulong t0)
1285
{
1286
    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1287
    env->CP0_XContext = (env->CP0_XContext & mask) | (t0 & ~mask);
1288
}
1289

    
1290
void do_mtc0_framemask (target_ulong t0)
1291
{
1292
    env->CP0_Framemask = t0; /* XXX */
1293
}
1294

    
1295
void do_mtc0_debug (target_ulong t0)
1296
{
1297
    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (t0 & 0x13300120);
1298
    if (t0 & (1 << CP0DB_DM))
1299
        env->hflags |= MIPS_HFLAG_DM;
1300
    else
1301
        env->hflags &= ~MIPS_HFLAG_DM;
1302
}
1303

    
1304
void do_mttc0_debug(target_ulong t0)
1305
{
1306
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1307
    uint32_t val = t0 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1308

    
1309
    /* XXX: Might be wrong, check with EJTAG spec. */
1310
    if (other_tc == env->current_tc)
1311
        env->active_tc.CP0_Debug_tcstatus = val;
1312
    else
1313
        env->tcs[other_tc].CP0_Debug_tcstatus = val;
1314
    env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1315
                     (t0 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1316
}
1317

    
1318
void do_mtc0_performance0 (target_ulong t0)
1319
{
1320
    env->CP0_Performance0 = t0 & 0x000007ff;
1321
}
1322

    
1323
void do_mtc0_taglo (target_ulong t0)
1324
{
1325
    env->CP0_TagLo = t0 & 0xFFFFFCF6;
1326
}
1327

    
1328
void do_mtc0_datalo (target_ulong t0)
1329
{
1330
    env->CP0_DataLo = t0; /* XXX */
1331
}
1332

    
1333
void do_mtc0_taghi (target_ulong t0)
1334
{
1335
    env->CP0_TagHi = t0; /* XXX */
1336
}
1337

    
1338
void do_mtc0_datahi (target_ulong t0)
1339
{
1340
    env->CP0_DataHi = t0; /* XXX */
1341
}
1342

    
1343
void do_mtc0_status_debug(uint32_t old, uint32_t val)
1344
{
1345
    qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1346
            old, old & env->CP0_Cause & CP0Ca_IP_mask,
1347
            val, val & env->CP0_Cause & CP0Ca_IP_mask,
1348
            env->CP0_Cause);
1349
    switch (env->hflags & MIPS_HFLAG_KSU) {
1350
    case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1351
    case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1352
    case MIPS_HFLAG_KM: qemu_log("\n"); break;
1353
    default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1354
    }
1355
}
1356

    
1357
void do_mtc0_status_irqraise_debug(void)
1358
{
1359
    qemu_log("Raise pending IRQs\n");
1360
}
1361

    
1362
/* MIPS MT functions */
1363
target_ulong do_mftgpr(uint32_t sel)
1364
{
1365
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1366

    
1367
    if (other_tc == env->current_tc)
1368
        return env->active_tc.gpr[sel];
1369
    else
1370
        return env->tcs[other_tc].gpr[sel];
1371
}
1372

    
1373
target_ulong do_mftlo(uint32_t sel)
1374
{
1375
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1376

    
1377
    if (other_tc == env->current_tc)
1378
        return env->active_tc.LO[sel];
1379
    else
1380
        return env->tcs[other_tc].LO[sel];
1381
}
1382

    
1383
target_ulong do_mfthi(uint32_t sel)
1384
{
1385
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1386

    
1387
    if (other_tc == env->current_tc)
1388
        return env->active_tc.HI[sel];
1389
    else
1390
        return env->tcs[other_tc].HI[sel];
1391
}
1392

    
1393
target_ulong do_mftacx(uint32_t sel)
1394
{
1395
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1396

    
1397
    if (other_tc == env->current_tc)
1398
        return env->active_tc.ACX[sel];
1399
    else
1400
        return env->tcs[other_tc].ACX[sel];
1401
}
1402

    
1403
target_ulong do_mftdsp(void)
1404
{
1405
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1406

    
1407
    if (other_tc == env->current_tc)
1408
        return env->active_tc.DSPControl;
1409
    else
1410
        return env->tcs[other_tc].DSPControl;
1411
}
1412

    
1413
void do_mttgpr(target_ulong t0, uint32_t sel)
1414
{
1415
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1416

    
1417
    if (other_tc == env->current_tc)
1418
        env->active_tc.gpr[sel] = t0;
1419
    else
1420
        env->tcs[other_tc].gpr[sel] = t0;
1421
}
1422

    
1423
void do_mttlo(target_ulong t0, uint32_t sel)
1424
{
1425
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1426

    
1427
    if (other_tc == env->current_tc)
1428
        env->active_tc.LO[sel] = t0;
1429
    else
1430
        env->tcs[other_tc].LO[sel] = t0;
1431
}
1432

    
1433
void do_mtthi(target_ulong t0, uint32_t sel)
1434
{
1435
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1436

    
1437
    if (other_tc == env->current_tc)
1438
        env->active_tc.HI[sel] = t0;
1439
    else
1440
        env->tcs[other_tc].HI[sel] = t0;
1441
}
1442

    
1443
void do_mttacx(target_ulong t0, uint32_t sel)
1444
{
1445
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1446

    
1447
    if (other_tc == env->current_tc)
1448
        env->active_tc.ACX[sel] = t0;
1449
    else
1450
        env->tcs[other_tc].ACX[sel] = t0;
1451
}
1452

    
1453
void do_mttdsp(target_ulong t0)
1454
{
1455
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1456

    
1457
    if (other_tc == env->current_tc)
1458
        env->active_tc.DSPControl = t0;
1459
    else
1460
        env->tcs[other_tc].DSPControl = t0;
1461
}
1462

    
1463
/* MIPS MT functions */
1464
target_ulong do_dmt(target_ulong t0)
1465
{
1466
    // TODO
1467
    t0 = 0;
1468
    // rt = t0
1469

    
1470
    return t0;
1471
}
1472

    
1473
target_ulong do_emt(target_ulong t0)
1474
{
1475
    // TODO
1476
    t0 = 0;
1477
    // rt = t0
1478

    
1479
    return t0;
1480
}
1481

    
1482
target_ulong do_dvpe(target_ulong t0)
1483
{
1484
    // TODO
1485
    t0 = 0;
1486
    // rt = t0
1487

    
1488
    return t0;
1489
}
1490

    
1491
target_ulong do_evpe(target_ulong t0)
1492
{
1493
    // TODO
1494
    t0 = 0;
1495
    // rt = t0
1496

    
1497
    return t0;
1498
}
1499
#endif /* !CONFIG_USER_ONLY */
1500

    
1501
void do_fork(target_ulong t0, target_ulong t1)
1502
{
1503
    // t0 = rt, t1 = rs
1504
    t0 = 0;
1505
    // TODO: store to TC register
1506
}
1507

    
1508
target_ulong do_yield(target_ulong t0)
1509
{
1510
    if (t0 < 0) {
1511
        /* No scheduling policy implemented. */
1512
        if (t0 != -2) {
1513
            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1514
                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1515
                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1516
                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1517
                do_raise_exception(EXCP_THREAD);
1518
            }
1519
        }
1520
    } else if (t0 == 0) {
1521
        if (0 /* TODO: TC underflow */) {
1522
            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1523
            do_raise_exception(EXCP_THREAD);
1524
        } else {
1525
            // TODO: Deallocate TC
1526
        }
1527
    } else if (t0 > 0) {
1528
        /* Yield qualifier inputs not implemented. */
1529
        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1530
        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1531
        do_raise_exception(EXCP_THREAD);
1532
    }
1533
    return env->CP0_YQMask;
1534
}
1535

    
1536
#ifndef CONFIG_USER_ONLY
1537
/* TLB management */
1538
void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1539
{
1540
    /* Flush qemu's TLB and discard all shadowed entries.  */
1541
    tlb_flush (env, flush_global);
1542
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
1543
}
1544

    
1545
static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1546
{
1547
    /* Discard entries from env->tlb[first] onwards.  */
1548
    while (env->tlb->tlb_in_use > first) {
1549
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1550
    }
1551
}
1552

    
1553
static void r4k_fill_tlb (int idx)
1554
{
1555
    r4k_tlb_t *tlb;
1556

    
1557
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1558
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1559
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1560
#if defined(TARGET_MIPS64)
1561
    tlb->VPN &= env->SEGMask;
1562
#endif
1563
    tlb->ASID = env->CP0_EntryHi & 0xFF;
1564
    tlb->PageMask = env->CP0_PageMask;
1565
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1566
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1567
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1568
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1569
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1570
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1571
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1572
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1573
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1574
}
1575

    
1576
void r4k_do_tlbwi (void)
1577
{
1578
    int idx;
1579

    
1580
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1581

    
1582
    /* Discard cached TLB entries.  We could avoid doing this if the
1583
       tlbwi is just upgrading access permissions on the current entry;
1584
       that might be a further win.  */
1585
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1586

    
1587
    r4k_invalidate_tlb(env, idx, 0);
1588
    r4k_fill_tlb(idx);
1589
}
1590

    
1591
void r4k_do_tlbwr (void)
1592
{
1593
    int r = cpu_mips_get_random(env);
1594

    
1595
    r4k_invalidate_tlb(env, r, 1);
1596
    r4k_fill_tlb(r);
1597
}
1598

    
1599
void r4k_do_tlbp (void)
1600
{
1601
    r4k_tlb_t *tlb;
1602
    target_ulong mask;
1603
    target_ulong tag;
1604
    target_ulong VPN;
1605
    uint8_t ASID;
1606
    int i;
1607

    
1608
    ASID = env->CP0_EntryHi & 0xFF;
1609
    for (i = 0; i < env->tlb->nb_tlb; i++) {
1610
        tlb = &env->tlb->mmu.r4k.tlb[i];
1611
        /* 1k pages are not supported. */
1612
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1613
        tag = env->CP0_EntryHi & ~mask;
1614
        VPN = tlb->VPN & ~mask;
1615
        /* Check ASID, virtual page number & size */
1616
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1617
            /* TLB match */
1618
            env->CP0_Index = i;
1619
            break;
1620
        }
1621
    }
1622
    if (i == env->tlb->nb_tlb) {
1623
        /* No match.  Discard any shadow entries, if any of them match.  */
1624
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1625
            tlb = &env->tlb->mmu.r4k.tlb[i];
1626
            /* 1k pages are not supported. */
1627
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1628
            tag = env->CP0_EntryHi & ~mask;
1629
            VPN = tlb->VPN & ~mask;
1630
            /* Check ASID, virtual page number & size */
1631
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1632
                r4k_mips_tlb_flush_extra (env, i);
1633
                break;
1634
            }
1635
        }
1636

    
1637
        env->CP0_Index |= 0x80000000;
1638
    }
1639
}
1640

    
1641
void r4k_do_tlbr (void)
1642
{
1643
    r4k_tlb_t *tlb;
1644
    uint8_t ASID;
1645
    int idx;
1646

    
1647
    ASID = env->CP0_EntryHi & 0xFF;
1648
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1649
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1650

    
1651
    /* If this will change the current ASID, flush qemu's TLB.  */
1652
    if (ASID != tlb->ASID)
1653
        cpu_mips_tlb_flush (env, 1);
1654

    
1655
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1656

    
1657
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1658
    env->CP0_PageMask = tlb->PageMask;
1659
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1660
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1661
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1662
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1663
}
1664

    
1665
void do_tlbwi(void)
1666
{
1667
    env->tlb->do_tlbwi();
1668
}
1669

    
1670
void do_tlbwr(void)
1671
{
1672
    env->tlb->do_tlbwr();
1673
}
1674

    
1675
void do_tlbp(void)
1676
{
1677
    env->tlb->do_tlbp();
1678
}
1679

    
1680
void do_tlbr(void)
1681
{
1682
    env->tlb->do_tlbr();
1683
}
1684

    
1685
/* Specials */
1686
target_ulong do_di (void)
1687
{
1688
    target_ulong t0 = env->CP0_Status;
1689

    
1690
    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1691
    cpu_mips_update_irq(env);
1692

    
1693
    return t0;
1694
}
1695

    
1696
target_ulong do_ei (void)
1697
{
1698
    target_ulong t0 = env->CP0_Status;
1699

    
1700
    env->CP0_Status = t0 | (1 << CP0St_IE);
1701
    cpu_mips_update_irq(env);
1702

    
1703
    return t0;
1704
}
1705

    
1706
static void debug_pre_eret (void)
1707
{
1708
    if (loglevel & CPU_LOG_EXEC) {
1709
        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1710
                env->active_tc.PC, env->CP0_EPC);
1711
        if (env->CP0_Status & (1 << CP0St_ERL))
1712
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1713
        if (env->hflags & MIPS_HFLAG_DM)
1714
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1715
        qemu_log("\n");
1716
    }
1717
}
1718

    
1719
static void debug_post_eret (void)
1720
{
1721
    if (loglevel & CPU_LOG_EXEC) {
1722
        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1723
                env->active_tc.PC, env->CP0_EPC);
1724
        if (env->CP0_Status & (1 << CP0St_ERL))
1725
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1726
        if (env->hflags & MIPS_HFLAG_DM)
1727
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1728
        switch (env->hflags & MIPS_HFLAG_KSU) {
1729
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1730
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1731
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1732
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1733
        }
1734
    }
1735
}
1736

    
1737
void do_eret (void)
1738
{
1739
    debug_pre_eret();
1740
    if (env->CP0_Status & (1 << CP0St_ERL)) {
1741
        env->active_tc.PC = env->CP0_ErrorEPC;
1742
        env->CP0_Status &= ~(1 << CP0St_ERL);
1743
    } else {
1744
        env->active_tc.PC = env->CP0_EPC;
1745
        env->CP0_Status &= ~(1 << CP0St_EXL);
1746
    }
1747
    compute_hflags(env);
1748
    debug_post_eret();
1749
    env->CP0_LLAddr = 1;
1750
}
1751

    
1752
void do_deret (void)
1753
{
1754
    debug_pre_eret();
1755
    env->active_tc.PC = env->CP0_DEPC;
1756
    env->hflags &= MIPS_HFLAG_DM;
1757
    compute_hflags(env);
1758
    debug_post_eret();
1759
    env->CP0_LLAddr = 1;
1760
}
1761
#endif /* !CONFIG_USER_ONLY */
1762

    
1763
target_ulong do_rdhwr_cpunum(void)
1764
{
1765
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1766
        (env->CP0_HWREna & (1 << 0)))
1767
        return env->CP0_EBase & 0x3ff;
1768
    else
1769
        do_raise_exception(EXCP_RI);
1770

    
1771
    return 0;
1772
}
1773

    
1774
target_ulong do_rdhwr_synci_step(void)
1775
{
1776
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1777
        (env->CP0_HWREna & (1 << 1)))
1778
        return env->SYNCI_Step;
1779
    else
1780
        do_raise_exception(EXCP_RI);
1781

    
1782
    return 0;
1783
}
1784

    
1785
target_ulong do_rdhwr_cc(void)
1786
{
1787
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1788
        (env->CP0_HWREna & (1 << 2)))
1789
        return env->CP0_Count;
1790
    else
1791
        do_raise_exception(EXCP_RI);
1792

    
1793
    return 0;
1794
}
1795

    
1796
target_ulong do_rdhwr_ccres(void)
1797
{
1798
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1799
        (env->CP0_HWREna & (1 << 3)))
1800
        return env->CCRes;
1801
    else
1802
        do_raise_exception(EXCP_RI);
1803

    
1804
    return 0;
1805
}
1806

    
1807
void do_pmon (int function)
1808
{
1809
    function /= 2;
1810
    switch (function) {
1811
    case 2: /* TODO: char inbyte(int waitflag); */
1812
        if (env->active_tc.gpr[4] == 0)
1813
            env->active_tc.gpr[2] = -1;
1814
        /* Fall through */
1815
    case 11: /* TODO: char inbyte (void); */
1816
        env->active_tc.gpr[2] = -1;
1817
        break;
1818
    case 3:
1819
    case 12:
1820
        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1821
        break;
1822
    case 17:
1823
        break;
1824
    case 158:
1825
        {
1826
            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
1827
            printf("%s", fmt);
1828
        }
1829
        break;
1830
    }
1831
}
1832

    
1833
void do_wait (void)
1834
{
1835
    env->halted = 1;
1836
    do_raise_exception(EXCP_HLT);
1837
}
1838

    
1839
#if !defined(CONFIG_USER_ONLY)
1840

    
1841
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1842

    
1843
#define MMUSUFFIX _mmu
1844
#define ALIGNED_ONLY
1845

    
1846
#define SHIFT 0
1847
#include "softmmu_template.h"
1848

    
1849
#define SHIFT 1
1850
#include "softmmu_template.h"
1851

    
1852
#define SHIFT 2
1853
#include "softmmu_template.h"
1854

    
1855
#define SHIFT 3
1856
#include "softmmu_template.h"
1857

    
1858
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
1859
{
1860
    env->CP0_BadVAddr = addr;
1861
    do_restore_state (retaddr);
1862
    do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
1863
}
1864

    
1865
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1866
{
1867
    TranslationBlock *tb;
1868
    CPUState *saved_env;
1869
    unsigned long pc;
1870
    int ret;
1871

    
1872
    /* XXX: hack to restore env in all cases, even if not called from
1873
       generated code */
1874
    saved_env = env;
1875
    env = cpu_single_env;
1876
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1877
    if (ret) {
1878
        if (retaddr) {
1879
            /* now we have a real cpu fault */
1880
            pc = (unsigned long)retaddr;
1881
            tb = tb_find_pc(pc);
1882
            if (tb) {
1883
                /* the PC is inside the translated code. It means that we have
1884
                   a virtual CPU fault */
1885
                cpu_restore_state(tb, env, pc, NULL);
1886
            }
1887
        }
1888
        do_raise_exception_err(env->exception_index, env->error_code);
1889
    }
1890
    env = saved_env;
1891
}
1892

    
1893
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
1894
                          int unused, int size)
1895
{
1896
    if (is_exec)
1897
        do_raise_exception(EXCP_IBE);
1898
    else
1899
        do_raise_exception(EXCP_DBE);
1900
}
1901
#endif /* !CONFIG_USER_ONLY */
1902

    
1903
/* Complex FPU operations which may need stack space. */
1904

    
1905
#define FLOAT_ONE32 make_float32(0x3f8 << 20)
1906
#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
1907
#define FLOAT_TWO32 make_float32(1 << 30)
1908
#define FLOAT_TWO64 make_float64(1ULL << 62)
1909
#define FLOAT_QNAN32 0x7fbfffff
1910
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
1911
#define FLOAT_SNAN32 0x7fffffff
1912
#define FLOAT_SNAN64 0x7fffffffffffffffULL
1913

    
1914
/* convert MIPS rounding mode in FCR31 to IEEE library */
1915
unsigned int ieee_rm[] = {
1916
    float_round_nearest_even,
1917
    float_round_to_zero,
1918
    float_round_up,
1919
    float_round_down
1920
};
1921

    
1922
#define RESTORE_ROUNDING_MODE \
1923
    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1924

    
1925
target_ulong do_cfc1 (uint32_t reg)
1926
{
1927
    target_ulong t0;
1928

    
1929
    switch (reg) {
1930
    case 0:
1931
        t0 = (int32_t)env->active_fpu.fcr0;
1932
        break;
1933
    case 25:
1934
        t0 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
1935
        break;
1936
    case 26:
1937
        t0 = env->active_fpu.fcr31 & 0x0003f07c;
1938
        break;
1939
    case 28:
1940
        t0 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
1941
        break;
1942
    default:
1943
        t0 = (int32_t)env->active_fpu.fcr31;
1944
        break;
1945
    }
1946

    
1947
    return t0;
1948
}
1949

    
1950
void do_ctc1 (target_ulong t0, uint32_t reg)
1951
{
1952
    switch(reg) {
1953
    case 25:
1954
        if (t0 & 0xffffff00)
1955
            return;
1956
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((t0 & 0xfe) << 24) |
1957
                     ((t0 & 0x1) << 23);
1958
        break;
1959
    case 26:
1960
        if (t0 & 0x007c0000)
1961
            return;
1962
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (t0 & 0x0003f07c);
1963
        break;
1964
    case 28:
1965
        if (t0 & 0x007c0000)
1966
            return;
1967
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (t0 & 0x00000f83) |
1968
                     ((t0 & 0x4) << 22);
1969
        break;
1970
    case 31:
1971
        if (t0 & 0x007c0000)
1972
            return;
1973
        env->active_fpu.fcr31 = t0;
1974
        break;
1975
    default:
1976
        return;
1977
    }
1978
    /* set rounding mode */
1979
    RESTORE_ROUNDING_MODE;
1980
    set_float_exception_flags(0, &env->active_fpu.fp_status);
1981
    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
1982
        do_raise_exception(EXCP_FPE);
1983
}
1984

    
1985
static inline char ieee_ex_to_mips(char xcpt)
1986
{
1987
    return (xcpt & float_flag_inexact) >> 5 |
1988
           (xcpt & float_flag_underflow) >> 3 |
1989
           (xcpt & float_flag_overflow) >> 1 |
1990
           (xcpt & float_flag_divbyzero) << 1 |
1991
           (xcpt & float_flag_invalid) << 4;
1992
}
1993

    
1994
static inline char mips_ex_to_ieee(char xcpt)
1995
{
1996
    return (xcpt & FP_INEXACT) << 5 |
1997
           (xcpt & FP_UNDERFLOW) << 3 |
1998
           (xcpt & FP_OVERFLOW) << 1 |
1999
           (xcpt & FP_DIV0) >> 1 |
2000
           (xcpt & FP_INVALID) >> 4;
2001
}
2002

    
2003
static inline void update_fcr31(void)
2004
{
2005
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2006

    
2007
    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2008
    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2009
        do_raise_exception(EXCP_FPE);
2010
    else
2011
        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2012
}
2013

    
2014
/* Float support.
2015
   Single precition routines have a "s" suffix, double precision a
2016
   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2017
   paired single lower "pl", paired single upper "pu".  */
2018

    
2019
/* unary operations, modifying fp status  */
2020
uint64_t do_float_sqrt_d(uint64_t fdt0)
2021
{
2022
    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2023
}
2024

    
2025
uint32_t do_float_sqrt_s(uint32_t fst0)
2026
{
2027
    return float32_sqrt(fst0, &env->active_fpu.fp_status);
2028
}
2029

    
2030
uint64_t do_float_cvtd_s(uint32_t fst0)
2031
{
2032
    uint64_t fdt2;
2033

    
2034
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2035
    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2036
    update_fcr31();
2037
    return fdt2;
2038
}
2039

    
2040
uint64_t do_float_cvtd_w(uint32_t wt0)
2041
{
2042
    uint64_t fdt2;
2043

    
2044
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2045
    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2046
    update_fcr31();
2047
    return fdt2;
2048
}
2049

    
2050
uint64_t do_float_cvtd_l(uint64_t dt0)
2051
{
2052
    uint64_t fdt2;
2053

    
2054
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2055
    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2056
    update_fcr31();
2057
    return fdt2;
2058
}
2059

    
2060
uint64_t do_float_cvtl_d(uint64_t fdt0)
2061
{
2062
    uint64_t dt2;
2063

    
2064
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2065
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2066
    update_fcr31();
2067
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2068
        dt2 = FLOAT_SNAN64;
2069
    return dt2;
2070
}
2071

    
2072
uint64_t do_float_cvtl_s(uint32_t fst0)
2073
{
2074
    uint64_t dt2;
2075

    
2076
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2077
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2078
    update_fcr31();
2079
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2080
        dt2 = FLOAT_SNAN64;
2081
    return dt2;
2082
}
2083

    
2084
uint64_t do_float_cvtps_pw(uint64_t dt0)
2085
{
2086
    uint32_t fst2;
2087
    uint32_t fsth2;
2088

    
2089
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2090
    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2091
    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2092
    update_fcr31();
2093
    return ((uint64_t)fsth2 << 32) | fst2;
2094
}
2095

    
2096
uint64_t do_float_cvtpw_ps(uint64_t fdt0)
2097
{
2098
    uint32_t wt2;
2099
    uint32_t wth2;
2100

    
2101
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2102
    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2103
    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2104
    update_fcr31();
2105
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2106
        wt2 = FLOAT_SNAN32;
2107
        wth2 = FLOAT_SNAN32;
2108
    }
2109
    return ((uint64_t)wth2 << 32) | wt2;
2110
}
2111

    
2112
uint32_t do_float_cvts_d(uint64_t fdt0)
2113
{
2114
    uint32_t fst2;
2115

    
2116
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2117
    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2118
    update_fcr31();
2119
    return fst2;
2120
}
2121

    
2122
uint32_t do_float_cvts_w(uint32_t wt0)
2123
{
2124
    uint32_t fst2;
2125

    
2126
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2127
    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2128
    update_fcr31();
2129
    return fst2;
2130
}
2131

    
2132
uint32_t do_float_cvts_l(uint64_t dt0)
2133
{
2134
    uint32_t fst2;
2135

    
2136
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2137
    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2138
    update_fcr31();
2139
    return fst2;
2140
}
2141

    
2142
uint32_t do_float_cvts_pl(uint32_t wt0)
2143
{
2144
    uint32_t wt2;
2145

    
2146
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2147
    wt2 = wt0;
2148
    update_fcr31();
2149
    return wt2;
2150
}
2151

    
2152
uint32_t do_float_cvts_pu(uint32_t wth0)
2153
{
2154
    uint32_t wt2;
2155

    
2156
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2157
    wt2 = wth0;
2158
    update_fcr31();
2159
    return wt2;
2160
}
2161

    
2162
uint32_t do_float_cvtw_s(uint32_t fst0)
2163
{
2164
    uint32_t wt2;
2165

    
2166
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2167
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2168
    update_fcr31();
2169
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2170
        wt2 = FLOAT_SNAN32;
2171
    return wt2;
2172
}
2173

    
2174
uint32_t do_float_cvtw_d(uint64_t fdt0)
2175
{
2176
    uint32_t wt2;
2177

    
2178
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2179
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2180
    update_fcr31();
2181
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2182
        wt2 = FLOAT_SNAN32;
2183
    return wt2;
2184
}
2185

    
2186
uint64_t do_float_roundl_d(uint64_t fdt0)
2187
{
2188
    uint64_t dt2;
2189

    
2190
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2191
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2192
    RESTORE_ROUNDING_MODE;
2193
    update_fcr31();
2194
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2195
        dt2 = FLOAT_SNAN64;
2196
    return dt2;
2197
}
2198

    
2199
uint64_t do_float_roundl_s(uint32_t fst0)
2200
{
2201
    uint64_t dt2;
2202

    
2203
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2204
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2205
    RESTORE_ROUNDING_MODE;
2206
    update_fcr31();
2207
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2208
        dt2 = FLOAT_SNAN64;
2209
    return dt2;
2210
}
2211

    
2212
uint32_t do_float_roundw_d(uint64_t fdt0)
2213
{
2214
    uint32_t wt2;
2215

    
2216
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2217
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2218
    RESTORE_ROUNDING_MODE;
2219
    update_fcr31();
2220
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2221
        wt2 = FLOAT_SNAN32;
2222
    return wt2;
2223
}
2224

    
2225
uint32_t do_float_roundw_s(uint32_t fst0)
2226
{
2227
    uint32_t wt2;
2228

    
2229
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2230
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2231
    RESTORE_ROUNDING_MODE;
2232
    update_fcr31();
2233
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2234
        wt2 = FLOAT_SNAN32;
2235
    return wt2;
2236
}
2237

    
2238
uint64_t do_float_truncl_d(uint64_t fdt0)
2239
{
2240
    uint64_t dt2;
2241

    
2242
    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2243
    update_fcr31();
2244
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2245
        dt2 = FLOAT_SNAN64;
2246
    return dt2;
2247
}
2248

    
2249
uint64_t do_float_truncl_s(uint32_t fst0)
2250
{
2251
    uint64_t dt2;
2252

    
2253
    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2254
    update_fcr31();
2255
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2256
        dt2 = FLOAT_SNAN64;
2257
    return dt2;
2258
}
2259

    
2260
uint32_t do_float_truncw_d(uint64_t fdt0)
2261
{
2262
    uint32_t wt2;
2263

    
2264
    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2265
    update_fcr31();
2266
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2267
        wt2 = FLOAT_SNAN32;
2268
    return wt2;
2269
}
2270

    
2271
uint32_t do_float_truncw_s(uint32_t fst0)
2272
{
2273
    uint32_t wt2;
2274

    
2275
    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2276
    update_fcr31();
2277
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2278
        wt2 = FLOAT_SNAN32;
2279
    return wt2;
2280
}
2281

    
2282
uint64_t do_float_ceill_d(uint64_t fdt0)
2283
{
2284
    uint64_t dt2;
2285

    
2286
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2287
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2288
    RESTORE_ROUNDING_MODE;
2289
    update_fcr31();
2290
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2291
        dt2 = FLOAT_SNAN64;
2292
    return dt2;
2293
}
2294

    
2295
uint64_t do_float_ceill_s(uint32_t fst0)
2296
{
2297
    uint64_t dt2;
2298

    
2299
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2300
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2301
    RESTORE_ROUNDING_MODE;
2302
    update_fcr31();
2303
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2304
        dt2 = FLOAT_SNAN64;
2305
    return dt2;
2306
}
2307

    
2308
uint32_t do_float_ceilw_d(uint64_t fdt0)
2309
{
2310
    uint32_t wt2;
2311

    
2312
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2313
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2314
    RESTORE_ROUNDING_MODE;
2315
    update_fcr31();
2316
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2317
        wt2 = FLOAT_SNAN32;
2318
    return wt2;
2319
}
2320

    
2321
uint32_t do_float_ceilw_s(uint32_t fst0)
2322
{
2323
    uint32_t wt2;
2324

    
2325
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2326
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2327
    RESTORE_ROUNDING_MODE;
2328
    update_fcr31();
2329
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2330
        wt2 = FLOAT_SNAN32;
2331
    return wt2;
2332
}
2333

    
2334
uint64_t do_float_floorl_d(uint64_t fdt0)
2335
{
2336
    uint64_t dt2;
2337

    
2338
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2339
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2340
    RESTORE_ROUNDING_MODE;
2341
    update_fcr31();
2342
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2343
        dt2 = FLOAT_SNAN64;
2344
    return dt2;
2345
}
2346

    
2347
uint64_t do_float_floorl_s(uint32_t fst0)
2348
{
2349
    uint64_t dt2;
2350

    
2351
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2352
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2353
    RESTORE_ROUNDING_MODE;
2354
    update_fcr31();
2355
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2356
        dt2 = FLOAT_SNAN64;
2357
    return dt2;
2358
}
2359

    
2360
uint32_t do_float_floorw_d(uint64_t fdt0)
2361
{
2362
    uint32_t wt2;
2363

    
2364
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2365
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2366
    RESTORE_ROUNDING_MODE;
2367
    update_fcr31();
2368
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2369
        wt2 = FLOAT_SNAN32;
2370
    return wt2;
2371
}
2372

    
2373
uint32_t do_float_floorw_s(uint32_t fst0)
2374
{
2375
    uint32_t wt2;
2376

    
2377
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2378
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2379
    RESTORE_ROUNDING_MODE;
2380
    update_fcr31();
2381
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2382
        wt2 = FLOAT_SNAN32;
2383
    return wt2;
2384
}
2385

    
2386
/* unary operations, not modifying fp status  */
2387
#define FLOAT_UNOP(name)                                       \
2388
uint64_t do_float_ ## name ## _d(uint64_t fdt0)                \
2389
{                                                              \
2390
    return float64_ ## name(fdt0);                             \
2391
}                                                              \
2392
uint32_t do_float_ ## name ## _s(uint32_t fst0)                \
2393
{                                                              \
2394
    return float32_ ## name(fst0);                             \
2395
}                                                              \
2396
uint64_t do_float_ ## name ## _ps(uint64_t fdt0)               \
2397
{                                                              \
2398
    uint32_t wt0;                                              \
2399
    uint32_t wth0;                                             \
2400
                                                               \
2401
    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2402
    wth0 = float32_ ## name(fdt0 >> 32);                       \
2403
    return ((uint64_t)wth0 << 32) | wt0;                       \
2404
}
2405
FLOAT_UNOP(abs)
2406
FLOAT_UNOP(chs)
2407
#undef FLOAT_UNOP
2408

    
2409
/* MIPS specific unary operations */
2410
uint64_t do_float_recip_d(uint64_t fdt0)
2411
{
2412
    uint64_t fdt2;
2413

    
2414
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2415
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2416
    update_fcr31();
2417
    return fdt2;
2418
}
2419

    
2420
uint32_t do_float_recip_s(uint32_t fst0)
2421
{
2422
    uint32_t fst2;
2423

    
2424
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2425
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2426
    update_fcr31();
2427
    return fst2;
2428
}
2429

    
2430
uint64_t do_float_rsqrt_d(uint64_t fdt0)
2431
{
2432
    uint64_t fdt2;
2433

    
2434
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2435
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2436
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2437
    update_fcr31();
2438
    return fdt2;
2439
}
2440

    
2441
uint32_t do_float_rsqrt_s(uint32_t fst0)
2442
{
2443
    uint32_t fst2;
2444

    
2445
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2446
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2447
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2448
    update_fcr31();
2449
    return fst2;
2450
}
2451

    
2452
uint64_t do_float_recip1_d(uint64_t fdt0)
2453
{
2454
    uint64_t fdt2;
2455

    
2456
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2457
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2458
    update_fcr31();
2459
    return fdt2;
2460
}
2461

    
2462
uint32_t do_float_recip1_s(uint32_t fst0)
2463
{
2464
    uint32_t fst2;
2465

    
2466
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2467
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2468
    update_fcr31();
2469
    return fst2;
2470
}
2471

    
2472
uint64_t do_float_recip1_ps(uint64_t fdt0)
2473
{
2474
    uint32_t fst2;
2475
    uint32_t fsth2;
2476

    
2477
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2478
    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2479
    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2480
    update_fcr31();
2481
    return ((uint64_t)fsth2 << 32) | fst2;
2482
}
2483

    
2484
uint64_t do_float_rsqrt1_d(uint64_t fdt0)
2485
{
2486
    uint64_t fdt2;
2487

    
2488
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2489
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2490
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2491
    update_fcr31();
2492
    return fdt2;
2493
}
2494

    
2495
uint32_t do_float_rsqrt1_s(uint32_t fst0)
2496
{
2497
    uint32_t fst2;
2498

    
2499
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2500
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2501
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2502
    update_fcr31();
2503
    return fst2;
2504
}
2505

    
2506
uint64_t do_float_rsqrt1_ps(uint64_t fdt0)
2507
{
2508
    uint32_t fst2;
2509
    uint32_t fsth2;
2510

    
2511
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2512
    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2513
    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2514
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2515
    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2516
    update_fcr31();
2517
    return ((uint64_t)fsth2 << 32) | fst2;
2518
}
2519

    
2520
#define FLOAT_OP(name, p) void do_float_##name##_##p(void)
2521

    
2522
/* binary operations */
2523
#define FLOAT_BINOP(name)                                          \
2524
uint64_t do_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
2525
{                                                                  \
2526
    uint64_t dt2;                                                  \
2527
                                                                   \
2528
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2529
    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
2530
    update_fcr31();                                                \
2531
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2532
        dt2 = FLOAT_QNAN64;                                        \
2533
    return dt2;                                                    \
2534
}                                                                  \
2535
                                                                   \
2536
uint32_t do_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
2537
{                                                                  \
2538
    uint32_t wt2;                                                  \
2539
                                                                   \
2540
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2541
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2542
    update_fcr31();                                                \
2543
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2544
        wt2 = FLOAT_QNAN32;                                        \
2545
    return wt2;                                                    \
2546
}                                                                  \
2547
                                                                   \
2548
uint64_t do_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
2549
{                                                                  \
2550
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2551
    uint32_t fsth0 = fdt0 >> 32;                                   \
2552
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2553
    uint32_t fsth1 = fdt1 >> 32;                                   \
2554
    uint32_t wt2;                                                  \
2555
    uint32_t wth2;                                                 \
2556
                                                                   \
2557
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2558
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2559
    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
2560
    update_fcr31();                                                \
2561
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
2562
        wt2 = FLOAT_QNAN32;                                        \
2563
        wth2 = FLOAT_QNAN32;                                       \
2564
    }                                                              \
2565
    return ((uint64_t)wth2 << 32) | wt2;                           \
2566
}
2567

    
2568
FLOAT_BINOP(add)
2569
FLOAT_BINOP(sub)
2570
FLOAT_BINOP(mul)
2571
FLOAT_BINOP(div)
2572
#undef FLOAT_BINOP
2573

    
2574
/* ternary operations */
2575
#define FLOAT_TERNOP(name1, name2)                                        \
2576
uint64_t do_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
2577
                                           uint64_t fdt2)                 \
2578
{                                                                         \
2579
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2580
    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2581
}                                                                         \
2582
                                                                          \
2583
uint32_t do_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
2584
                                           uint32_t fst2)                 \
2585
{                                                                         \
2586
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2587
    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2588
}                                                                         \
2589
                                                                          \
2590
uint64_t do_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2591
                                            uint64_t fdt2)                \
2592
{                                                                         \
2593
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2594
    uint32_t fsth0 = fdt0 >> 32;                                          \
2595
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2596
    uint32_t fsth1 = fdt1 >> 32;                                          \
2597
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2598
    uint32_t fsth2 = fdt2 >> 32;                                          \
2599
                                                                          \
2600
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2601
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2602
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2603
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2604
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2605
}
2606

    
2607
FLOAT_TERNOP(mul, add)
2608
FLOAT_TERNOP(mul, sub)
2609
#undef FLOAT_TERNOP
2610

    
2611
/* negated ternary operations */
2612
#define FLOAT_NTERNOP(name1, name2)                                       \
2613
uint64_t do_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2614
                                           uint64_t fdt2)                 \
2615
{                                                                         \
2616
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2617
    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2618
    return float64_chs(fdt2);                                             \
2619
}                                                                         \
2620
                                                                          \
2621
uint32_t do_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2622
                                           uint32_t fst2)                 \
2623
{                                                                         \
2624
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2625
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2626
    return float32_chs(fst2);                                             \
2627
}                                                                         \
2628
                                                                          \
2629
uint64_t do_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2630
                                           uint64_t fdt2)                 \
2631
{                                                                         \
2632
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2633
    uint32_t fsth0 = fdt0 >> 32;                                          \
2634
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2635
    uint32_t fsth1 = fdt1 >> 32;                                          \
2636
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2637
    uint32_t fsth2 = fdt2 >> 32;                                          \
2638
                                                                          \
2639
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2640
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2641
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2642
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2643
    fst2 = float32_chs(fst2);                                             \
2644
    fsth2 = float32_chs(fsth2);                                           \
2645
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2646
}
2647

    
2648
FLOAT_NTERNOP(mul, add)
2649
FLOAT_NTERNOP(mul, sub)
2650
#undef FLOAT_NTERNOP
2651

    
2652
/* MIPS specific binary operations */
2653
uint64_t do_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2654
{
2655
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2656
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2657
    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2658
    update_fcr31();
2659
    return fdt2;
2660
}
2661

    
2662
uint32_t do_float_recip2_s(uint32_t fst0, uint32_t fst2)
2663
{
2664
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2665
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2666
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2667
    update_fcr31();
2668
    return fst2;
2669
}
2670

    
2671
uint64_t do_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2672
{
2673
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2674
    uint32_t fsth0 = fdt0 >> 32;
2675
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2676
    uint32_t fsth2 = fdt2 >> 32;
2677

    
2678
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2679
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2680
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2681
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2682
    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
2683
    update_fcr31();
2684
    return ((uint64_t)fsth2 << 32) | fst2;
2685
}
2686

    
2687
uint64_t do_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
2688
{
2689
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2690
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2691
    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
2692
    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
2693
    update_fcr31();
2694
    return fdt2;
2695
}
2696

    
2697
uint32_t do_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
2698
{
2699
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2700
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2701
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2702
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2703
    update_fcr31();
2704
    return fst2;
2705
}
2706

    
2707
uint64_t do_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
2708
{
2709
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2710
    uint32_t fsth0 = fdt0 >> 32;
2711
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2712
    uint32_t fsth2 = fdt2 >> 32;
2713

    
2714
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2715
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2716
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2717
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2718
    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
2719
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2720
    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
2721
    update_fcr31();
2722
    return ((uint64_t)fsth2 << 32) | fst2;
2723
}
2724

    
2725
uint64_t do_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
2726
{
2727
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2728
    uint32_t fsth0 = fdt0 >> 32;
2729
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2730
    uint32_t fsth1 = fdt1 >> 32;
2731
    uint32_t fst2;
2732
    uint32_t fsth2;
2733

    
2734
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2735
    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
2736
    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
2737
    update_fcr31();
2738
    return ((uint64_t)fsth2 << 32) | fst2;
2739
}
2740

    
2741
uint64_t do_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
2742
{
2743
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2744
    uint32_t fsth0 = fdt0 >> 32;
2745
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2746
    uint32_t fsth1 = fdt1 >> 32;
2747
    uint32_t fst2;
2748
    uint32_t fsth2;
2749

    
2750
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2751
    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
2752
    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
2753
    update_fcr31();
2754
    return ((uint64_t)fsth2 << 32) | fst2;
2755
}
2756

    
2757
/* compare operations */
2758
#define FOP_COND_D(op, cond)                                   \
2759
void do_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2760
{                                                              \
2761
    int c = cond;                                              \
2762
    update_fcr31();                                            \
2763
    if (c)                                                     \
2764
        SET_FP_COND(cc, env->active_fpu);                      \
2765
    else                                                       \
2766
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2767
}                                                              \
2768
void do_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2769
{                                                              \
2770
    int c;                                                     \
2771
    fdt0 = float64_abs(fdt0);                                  \
2772
    fdt1 = float64_abs(fdt1);                                  \
2773
    c = cond;                                                  \
2774
    update_fcr31();                                            \
2775
    if (c)                                                     \
2776
        SET_FP_COND(cc, env->active_fpu);                      \
2777
    else                                                       \
2778
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2779
}
2780

    
2781
static int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
2782
{
2783
    if (float64_is_signaling_nan(a) ||
2784
        float64_is_signaling_nan(b) ||
2785
        (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
2786
        float_raise(float_flag_invalid, status);
2787
        return 1;
2788
    } else if (float64_is_nan(a) || float64_is_nan(b)) {
2789
        return 1;
2790
    } else {
2791
        return 0;
2792
    }
2793
}
2794

    
2795
/* NOTE: the comma operator will make "cond" to eval to false,
2796
 * but float*_is_unordered() is still called. */
2797
FOP_COND_D(f,   (float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2798
FOP_COND_D(un,  float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status))
2799
FOP_COND_D(eq,  !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2800
FOP_COND_D(ueq, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2801
FOP_COND_D(olt, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2802
FOP_COND_D(ult, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2803
FOP_COND_D(ole, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2804
FOP_COND_D(ule, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2805
/* NOTE: the comma operator will make "cond" to eval to false,
2806
 * but float*_is_unordered() is still called. */
2807
FOP_COND_D(sf,  (float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2808
FOP_COND_D(ngle,float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status))
2809
FOP_COND_D(seq, !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2810
FOP_COND_D(ngl, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2811
FOP_COND_D(lt,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2812
FOP_COND_D(nge, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2813
FOP_COND_D(le,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2814
FOP_COND_D(ngt, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2815

    
2816
#define FOP_COND_S(op, cond)                                   \
2817
void do_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
2818
{                                                              \
2819
    int c = cond;                                              \
2820
    update_fcr31();                                            \
2821
    if (c)                                                     \
2822
        SET_FP_COND(cc, env->active_fpu);                      \
2823
    else                                                       \
2824
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2825
}                                                              \
2826
void do_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2827
{                                                              \
2828
    int c;                                                     \
2829
    fst0 = float32_abs(fst0);                                  \
2830
    fst1 = float32_abs(fst1);                                  \
2831
    c = cond;                                                  \
2832
    update_fcr31();                                            \
2833
    if (c)                                                     \
2834
        SET_FP_COND(cc, env->active_fpu);                      \
2835
    else                                                       \
2836
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2837
}
2838

    
2839
static flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
2840
{
2841
    if (float32_is_signaling_nan(a) ||
2842
        float32_is_signaling_nan(b) ||
2843
        (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
2844
        float_raise(float_flag_invalid, status);
2845
        return 1;
2846
    } else if (float32_is_nan(a) || float32_is_nan(b)) {
2847
        return 1;
2848
    } else {
2849
        return 0;
2850
    }
2851
}
2852

    
2853
/* NOTE: the comma operator will make "cond" to eval to false,
2854
 * but float*_is_unordered() is still called. */
2855
FOP_COND_S(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0))
2856
FOP_COND_S(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status))
2857
FOP_COND_S(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2858
FOP_COND_S(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2859
FOP_COND_S(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2860
FOP_COND_S(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2861
FOP_COND_S(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2862
FOP_COND_S(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2863
/* NOTE: the comma operator will make "cond" to eval to false,
2864
 * but float*_is_unordered() is still called. */
2865
FOP_COND_S(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0))
2866
FOP_COND_S(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status))
2867
FOP_COND_S(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2868
FOP_COND_S(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2869
FOP_COND_S(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2870
FOP_COND_S(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2871
FOP_COND_S(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2872
FOP_COND_S(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2873

    
2874
#define FOP_COND_PS(op, condl, condh)                           \
2875
void do_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2876
{                                                               \
2877
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2878
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
2879
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
2880
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
2881
    int cl = condl;                                             \
2882
    int ch = condh;                                             \
2883
                                                                \
2884
    update_fcr31();                                             \
2885
    if (cl)                                                     \
2886
        SET_FP_COND(cc, env->active_fpu);                       \
2887
    else                                                        \
2888
        CLEAR_FP_COND(cc, env->active_fpu);                     \
2889
    if (ch)                                                     \
2890
        SET_FP_COND(cc + 1, env->active_fpu);                   \
2891
    else                                                        \
2892
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
2893
}                                                               \
2894
void do_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2895
{                                                               \
2896
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2897
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
2898
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
2899
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
2900
    int cl = condl;                                             \
2901
    int ch = condh;                                             \
2902
                                                                \
2903
    update_fcr31();                                             \
2904
    if (cl)                                                     \
2905
        SET_FP_COND(cc, env->active_fpu);                       \
2906
    else                                                        \
2907
        CLEAR_FP_COND(cc, env->active_fpu);                     \
2908
    if (ch)                                                     \
2909
        SET_FP_COND(cc + 1, env->active_fpu);                   \
2910
    else                                                        \
2911
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
2912
}
2913

    
2914
/* NOTE: the comma operator will make "cond" to eval to false,
2915
 * but float*_is_unordered() is still called. */
2916
FOP_COND_PS(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0),
2917
                 (float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status), 0))
2918
FOP_COND_PS(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status),
2919
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status))
2920
FOP_COND_PS(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2921
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2922
FOP_COND_PS(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2923
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2924
FOP_COND_PS(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2925
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2926
FOP_COND_PS(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2927
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2928
FOP_COND_PS(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
2929
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
2930
FOP_COND_PS(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
2931
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
2932
/* NOTE: the comma operator will make "cond" to eval to false,
2933
 * but float*_is_unordered() is still called. */
2934
FOP_COND_PS(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0),
2935
                 (float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status), 0))
2936
FOP_COND_PS(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status),
2937
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status))
2938
FOP_COND_PS(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2939
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2940
FOP_COND_PS(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2941
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2942
FOP_COND_PS(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2943
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2944
FOP_COND_PS(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2945
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2946
FOP_COND_PS(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
2947
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
2948
FOP_COND_PS(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
2949
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))