Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ ef5b2344

History | View | Annotate | Download (103.9 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdlib.h>
20
#include "cpu.h"
21
#include "dyngen-exec.h"
22

    
23
#include "host-utils.h"
24

    
25
#include "helper.h"
26

    
27
#if !defined(CONFIG_USER_ONLY)
28
#include "softmmu_exec.h"
29
#endif /* !defined(CONFIG_USER_ONLY) */
30

    
31
#ifndef CONFIG_USER_ONLY
32
static inline void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global);
33
#endif
34

    
35
/*****************************************************************************/
36
/* Exceptions processing helpers */
37

    
38
void helper_raise_exception_err (uint32_t exception, int error_code)
39
{
40
#if 1
41
    if (exception < 0x100)
42
        qemu_log("%s: %d %d\n", __func__, exception, error_code);
43
#endif
44
    env->exception_index = exception;
45
    env->error_code = error_code;
46
    cpu_loop_exit(env);
47
}
48

    
49
void helper_raise_exception (uint32_t exception)
50
{
51
    helper_raise_exception_err(exception, 0);
52
}
53

    
54
#if !defined(CONFIG_USER_ONLY)
55
static void do_restore_state(uintptr_t pc)
56
{
57
    TranslationBlock *tb;
58

    
59
    tb = tb_find_pc (pc);
60
    if (tb) {
61
        cpu_restore_state(tb, env, pc);
62
    }
63
}
64
#endif
65

    
66
#if defined(CONFIG_USER_ONLY)
67
#define HELPER_LD(name, insn, type)                                     \
68
static inline type do_##name(target_ulong addr, int mem_idx)            \
69
{                                                                       \
70
    return (type) insn##_raw(addr);                                     \
71
}
72
#else
73
#define HELPER_LD(name, insn, type)                                     \
74
static inline type do_##name(target_ulong addr, int mem_idx)            \
75
{                                                                       \
76
    switch (mem_idx)                                                    \
77
    {                                                                   \
78
    case 0: return (type) insn##_kernel(addr); break;                   \
79
    case 1: return (type) insn##_super(addr); break;                    \
80
    default:                                                            \
81
    case 2: return (type) insn##_user(addr); break;                     \
82
    }                                                                   \
83
}
84
#endif
85
HELPER_LD(lbu, ldub, uint8_t)
86
HELPER_LD(lw, ldl, int32_t)
87
#ifdef TARGET_MIPS64
88
HELPER_LD(ld, ldq, int64_t)
89
#endif
90
#undef HELPER_LD
91

    
92
#if defined(CONFIG_USER_ONLY)
93
#define HELPER_ST(name, insn, type)                                     \
94
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
95
{                                                                       \
96
    insn##_raw(addr, val);                                              \
97
}
98
#else
99
#define HELPER_ST(name, insn, type)                                     \
100
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
101
{                                                                       \
102
    switch (mem_idx)                                                    \
103
    {                                                                   \
104
    case 0: insn##_kernel(addr, val); break;                            \
105
    case 1: insn##_super(addr, val); break;                             \
106
    default:                                                            \
107
    case 2: insn##_user(addr, val); break;                              \
108
    }                                                                   \
109
}
110
#endif
111
HELPER_ST(sb, stb, uint8_t)
112
HELPER_ST(sw, stl, uint32_t)
113
#ifdef TARGET_MIPS64
114
HELPER_ST(sd, stq, uint64_t)
115
#endif
116
#undef HELPER_ST
117

    
118
target_ulong helper_clo (target_ulong arg1)
119
{
120
    return clo32(arg1);
121
}
122

    
123
target_ulong helper_clz (target_ulong arg1)
124
{
125
    return clz32(arg1);
126
}
127

    
128
#if defined(TARGET_MIPS64)
129
target_ulong helper_dclo (target_ulong arg1)
130
{
131
    return clo64(arg1);
132
}
133

    
134
target_ulong helper_dclz (target_ulong arg1)
135
{
136
    return clz64(arg1);
137
}
138
#endif /* TARGET_MIPS64 */
139

    
140
/* 64 bits arithmetic for 32 bits hosts */
141
static inline uint64_t get_HILO (void)
142
{
143
    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
144
}
145

    
146
static inline target_ulong set_HIT0_LO(uint64_t HILO)
147
{
148
    target_ulong tmp;
149
    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
150
    tmp = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
151
    return tmp;
152
}
153

    
154
static inline target_ulong set_HI_LOT0(uint64_t HILO)
155
{
156
    target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
157
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
158
    return tmp;
159
}
160

    
161
/* Multiplication variants of the vr54xx. */
162
target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
163
{
164
    return set_HI_LOT0(0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
165
}
166

    
167
target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
168
{
169
    return set_HI_LOT0(0 - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
170
}
171

    
172
target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
173
{
174
    return set_HI_LOT0((int64_t)get_HILO() + (int64_t)(int32_t)arg1 *
175
                                             (int64_t)(int32_t)arg2);
176
}
177

    
178
target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
179
{
180
    return set_HIT0_LO((int64_t)get_HILO() + (int64_t)(int32_t)arg1 *
181
                                             (int64_t)(int32_t)arg2);
182
}
183

    
184
target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
185
{
186
    return set_HI_LOT0((uint64_t)get_HILO() + (uint64_t)(uint32_t)arg1 *
187
                                              (uint64_t)(uint32_t)arg2);
188
}
189

    
190
target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
191
{
192
    return set_HIT0_LO((uint64_t)get_HILO() + (uint64_t)(uint32_t)arg1 *
193
                                              (uint64_t)(uint32_t)arg2);
194
}
195

    
196
target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
197
{
198
    return set_HI_LOT0((int64_t)get_HILO() - (int64_t)(int32_t)arg1 *
199
                                             (int64_t)(int32_t)arg2);
200
}
201

    
202
target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
203
{
204
    return set_HIT0_LO((int64_t)get_HILO() - (int64_t)(int32_t)arg1 *
205
                                             (int64_t)(int32_t)arg2);
206
}
207

    
208
target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
209
{
210
    return set_HI_LOT0((uint64_t)get_HILO() - (uint64_t)(uint32_t)arg1 *
211
                                              (uint64_t)(uint32_t)arg2);
212
}
213

    
214
target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
215
{
216
    return set_HIT0_LO((uint64_t)get_HILO() - (uint64_t)(uint32_t)arg1 *
217
                                              (uint64_t)(uint32_t)arg2);
218
}
219

    
220
target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
221
{
222
    return set_HIT0_LO((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
223
}
224

    
225
target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
226
{
227
    return set_HIT0_LO((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
228
}
229

    
230
target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
231
{
232
    return set_HIT0_LO(0 - (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
233
}
234

    
235
target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
236
{
237
    return set_HIT0_LO(0 - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
238
}
239

    
240
#ifdef TARGET_MIPS64
241
void helper_dmult (target_ulong arg1, target_ulong arg2)
242
{
243
    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
244
}
245

    
246
void helper_dmultu (target_ulong arg1, target_ulong arg2)
247
{
248
    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
249
}
250
#endif
251

    
252
#ifndef CONFIG_USER_ONLY
253

    
254
static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
255
{
256
    target_phys_addr_t lladdr;
257

    
258
    lladdr = cpu_mips_translate_address(env, address, rw);
259

    
260
    if (lladdr == -1LL) {
261
        cpu_loop_exit(env);
262
    } else {
263
        return lladdr;
264
    }
265
}
266

    
267
#define HELPER_LD_ATOMIC(name, insn)                                          \
268
target_ulong helper_##name(target_ulong arg, int mem_idx)                     \
269
{                                                                             \
270
    env->lladdr = do_translate_address(arg, 0);                               \
271
    env->llval = do_##insn(arg, mem_idx);                                     \
272
    return env->llval;                                                        \
273
}
274
HELPER_LD_ATOMIC(ll, lw)
275
#ifdef TARGET_MIPS64
276
HELPER_LD_ATOMIC(lld, ld)
277
#endif
278
#undef HELPER_LD_ATOMIC
279

    
280
#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask)                      \
281
target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
282
{                                                                             \
283
    target_long tmp;                                                          \
284
                                                                              \
285
    if (arg2 & almask) {                                                      \
286
        env->CP0_BadVAddr = arg2;                                             \
287
        helper_raise_exception(EXCP_AdES);                                    \
288
    }                                                                         \
289
    if (do_translate_address(arg2, 1) == env->lladdr) {                       \
290
        tmp = do_##ld_insn(arg2, mem_idx);                                    \
291
        if (tmp == env->llval) {                                              \
292
            do_##st_insn(arg2, arg1, mem_idx);                                \
293
            return 1;                                                         \
294
        }                                                                     \
295
    }                                                                         \
296
    return 0;                                                                 \
297
}
298
HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
299
#ifdef TARGET_MIPS64
300
HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
301
#endif
302
#undef HELPER_ST_ATOMIC
303
#endif
304

    
305
#ifdef TARGET_WORDS_BIGENDIAN
306
#define GET_LMASK(v) ((v) & 3)
307
#define GET_OFFSET(addr, offset) (addr + (offset))
308
#else
309
#define GET_LMASK(v) (((v) & 3) ^ 3)
310
#define GET_OFFSET(addr, offset) (addr - (offset))
311
#endif
312

    
313
target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
314
{
315
    target_ulong tmp;
316

    
317
    tmp = do_lbu(arg2, mem_idx);
318
    arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
319

    
320
    if (GET_LMASK(arg2) <= 2) {
321
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
322
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
323
    }
324

    
325
    if (GET_LMASK(arg2) <= 1) {
326
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
327
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
328
    }
329

    
330
    if (GET_LMASK(arg2) == 0) {
331
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
332
        arg1 = (arg1 & 0xFFFFFF00) | tmp;
333
    }
334
    return (int32_t)arg1;
335
}
336

    
337
target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
338
{
339
    target_ulong tmp;
340

    
341
    tmp = do_lbu(arg2, mem_idx);
342
    arg1 = (arg1 & 0xFFFFFF00) | tmp;
343

    
344
    if (GET_LMASK(arg2) >= 1) {
345
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
346
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
347
    }
348

    
349
    if (GET_LMASK(arg2) >= 2) {
350
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
351
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
352
    }
353

    
354
    if (GET_LMASK(arg2) == 3) {
355
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
356
        arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
357
    }
358
    return (int32_t)arg1;
359
}
360

    
361
void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
362
{
363
    do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
364

    
365
    if (GET_LMASK(arg2) <= 2)
366
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
367

    
368
    if (GET_LMASK(arg2) <= 1)
369
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
370

    
371
    if (GET_LMASK(arg2) == 0)
372
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
373
}
374

    
375
void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
376
{
377
    do_sb(arg2, (uint8_t)arg1, mem_idx);
378

    
379
    if (GET_LMASK(arg2) >= 1)
380
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
381

    
382
    if (GET_LMASK(arg2) >= 2)
383
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
384

    
385
    if (GET_LMASK(arg2) == 3)
386
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
387
}
388

    
389
#if defined(TARGET_MIPS64)
390
/* "half" load and stores.  We must do the memory access inline,
391
   or fault handling won't work.  */
392

    
393
#ifdef TARGET_WORDS_BIGENDIAN
394
#define GET_LMASK64(v) ((v) & 7)
395
#else
396
#define GET_LMASK64(v) (((v) & 7) ^ 7)
397
#endif
398

    
399
target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
400
{
401
    uint64_t tmp;
402

    
403
    tmp = do_lbu(arg2, mem_idx);
404
    arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
405

    
406
    if (GET_LMASK64(arg2) <= 6) {
407
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
408
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
409
    }
410

    
411
    if (GET_LMASK64(arg2) <= 5) {
412
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
413
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
414
    }
415

    
416
    if (GET_LMASK64(arg2) <= 4) {
417
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
418
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
419
    }
420

    
421
    if (GET_LMASK64(arg2) <= 3) {
422
        tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
423
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
424
    }
425

    
426
    if (GET_LMASK64(arg2) <= 2) {
427
        tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
428
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
429
    }
430

    
431
    if (GET_LMASK64(arg2) <= 1) {
432
        tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
433
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
434
    }
435

    
436
    if (GET_LMASK64(arg2) == 0) {
437
        tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
438
        arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
439
    }
440

    
441
    return arg1;
442
}
443

    
444
target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
445
{
446
    uint64_t tmp;
447

    
448
    tmp = do_lbu(arg2, mem_idx);
449
    arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
450

    
451
    if (GET_LMASK64(arg2) >= 1) {
452
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
453
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
454
    }
455

    
456
    if (GET_LMASK64(arg2) >= 2) {
457
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
458
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
459
    }
460

    
461
    if (GET_LMASK64(arg2) >= 3) {
462
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
463
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
464
    }
465

    
466
    if (GET_LMASK64(arg2) >= 4) {
467
        tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
468
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
469
    }
470

    
471
    if (GET_LMASK64(arg2) >= 5) {
472
        tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
473
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
474
    }
475

    
476
    if (GET_LMASK64(arg2) >= 6) {
477
        tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
478
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
479
    }
480

    
481
    if (GET_LMASK64(arg2) == 7) {
482
        tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
483
        arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
484
    }
485

    
486
    return arg1;
487
}
488

    
489
void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
490
{
491
    do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
492

    
493
    if (GET_LMASK64(arg2) <= 6)
494
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
495

    
496
    if (GET_LMASK64(arg2) <= 5)
497
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
498

    
499
    if (GET_LMASK64(arg2) <= 4)
500
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
501

    
502
    if (GET_LMASK64(arg2) <= 3)
503
        do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
504

    
505
    if (GET_LMASK64(arg2) <= 2)
506
        do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
507

    
508
    if (GET_LMASK64(arg2) <= 1)
509
        do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
510

    
511
    if (GET_LMASK64(arg2) <= 0)
512
        do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
513
}
514

    
515
void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
516
{
517
    do_sb(arg2, (uint8_t)arg1, mem_idx);
518

    
519
    if (GET_LMASK64(arg2) >= 1)
520
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
521

    
522
    if (GET_LMASK64(arg2) >= 2)
523
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
524

    
525
    if (GET_LMASK64(arg2) >= 3)
526
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
527

    
528
    if (GET_LMASK64(arg2) >= 4)
529
        do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
530

    
531
    if (GET_LMASK64(arg2) >= 5)
532
        do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
533

    
534
    if (GET_LMASK64(arg2) >= 6)
535
        do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
536

    
537
    if (GET_LMASK64(arg2) == 7)
538
        do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
539
}
540
#endif /* TARGET_MIPS64 */
541

    
542
static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
543

    
544
void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
545
{
546
    target_ulong base_reglist = reglist & 0xf;
547
    target_ulong do_r31 = reglist & 0x10;
548
#ifdef CONFIG_USER_ONLY
549
#undef ldfun
550
#define ldfun ldl_raw
551
#else
552
    uint32_t (*ldfun)(target_ulong);
553

    
554
    switch (mem_idx)
555
    {
556
    case 0: ldfun = ldl_kernel; break;
557
    case 1: ldfun = ldl_super; break;
558
    default:
559
    case 2: ldfun = ldl_user; break;
560
    }
561
#endif
562

    
563
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
564
        target_ulong i;
565

    
566
        for (i = 0; i < base_reglist; i++) {
567
            env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
568
            addr += 4;
569
        }
570
    }
571

    
572
    if (do_r31) {
573
        env->active_tc.gpr[31] = (target_long) ldfun(addr);
574
    }
575
}
576

    
577
void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
578
{
579
    target_ulong base_reglist = reglist & 0xf;
580
    target_ulong do_r31 = reglist & 0x10;
581
#ifdef CONFIG_USER_ONLY
582
#undef stfun
583
#define stfun stl_raw
584
#else
585
    void (*stfun)(target_ulong, uint32_t);
586

    
587
    switch (mem_idx)
588
    {
589
    case 0: stfun = stl_kernel; break;
590
    case 1: stfun = stl_super; break;
591
     default:
592
    case 2: stfun = stl_user; break;
593
    }
594
#endif
595

    
596
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
597
        target_ulong i;
598

    
599
        for (i = 0; i < base_reglist; i++) {
600
            stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
601
            addr += 4;
602
        }
603
    }
604

    
605
    if (do_r31) {
606
        stfun(addr, env->active_tc.gpr[31]);
607
    }
608
}
609

    
610
#if defined(TARGET_MIPS64)
611
void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
612
{
613
    target_ulong base_reglist = reglist & 0xf;
614
    target_ulong do_r31 = reglist & 0x10;
615
#ifdef CONFIG_USER_ONLY
616
#undef ldfun
617
#define ldfun ldq_raw
618
#else
619
    uint64_t (*ldfun)(target_ulong);
620

    
621
    switch (mem_idx)
622
    {
623
    case 0: ldfun = ldq_kernel; break;
624
    case 1: ldfun = ldq_super; break;
625
    default:
626
    case 2: ldfun = ldq_user; break;
627
    }
628
#endif
629

    
630
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
631
        target_ulong i;
632

    
633
        for (i = 0; i < base_reglist; i++) {
634
            env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
635
            addr += 8;
636
        }
637
    }
638

    
639
    if (do_r31) {
640
        env->active_tc.gpr[31] = ldfun(addr);
641
    }
642
}
643

    
644
void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
645
{
646
    target_ulong base_reglist = reglist & 0xf;
647
    target_ulong do_r31 = reglist & 0x10;
648
#ifdef CONFIG_USER_ONLY
649
#undef stfun
650
#define stfun stq_raw
651
#else
652
    void (*stfun)(target_ulong, uint64_t);
653

    
654
    switch (mem_idx)
655
    {
656
    case 0: stfun = stq_kernel; break;
657
    case 1: stfun = stq_super; break;
658
     default:
659
    case 2: stfun = stq_user; break;
660
    }
661
#endif
662

    
663
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
664
        target_ulong i;
665

    
666
        for (i = 0; i < base_reglist; i++) {
667
            stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
668
            addr += 8;
669
        }
670
    }
671

    
672
    if (do_r31) {
673
        stfun(addr, env->active_tc.gpr[31]);
674
    }
675
}
676
#endif
677

    
678
#ifndef CONFIG_USER_ONLY
679
/* SMP helpers.  */
680
static int mips_vpe_is_wfi(CPUMIPSState *c)
681
{
682
    /* If the VPE is halted but otherwise active, it means it's waiting for
683
       an interrupt.  */
684
    return c->halted && mips_vpe_active(c);
685
}
686

    
687
static inline void mips_vpe_wake(CPUMIPSState *c)
688
{
689
    /* Dont set ->halted = 0 directly, let it be done via cpu_has_work
690
       because there might be other conditions that state that c should
691
       be sleeping.  */
692
    cpu_interrupt(c, CPU_INTERRUPT_WAKE);
693
}
694

    
695
static inline void mips_vpe_sleep(CPUMIPSState *c)
696
{
697
    /* The VPE was shut off, really go to bed.
698
       Reset any old _WAKE requests.  */
699
    c->halted = 1;
700
    cpu_reset_interrupt(c, CPU_INTERRUPT_WAKE);
701
}
702

    
703
static inline void mips_tc_wake(CPUMIPSState *c, int tc)
704
{
705
    /* FIXME: TC reschedule.  */
706
    if (mips_vpe_active(c) && !mips_vpe_is_wfi(c)) {
707
        mips_vpe_wake(c);
708
    }
709
}
710

    
711
static inline void mips_tc_sleep(CPUMIPSState *c, int tc)
712
{
713
    /* FIXME: TC reschedule.  */
714
    if (!mips_vpe_active(c)) {
715
        mips_vpe_sleep(c);
716
    }
717
}
718

    
719
/* tc should point to an int with the value of the global TC index.
720
   This function will transform it into a local index within the
721
   returned CPUMIPSState.
722

723
   FIXME: This code assumes that all VPEs have the same number of TCs,
724
          which depends on runtime setup. Can probably be fixed by
725
          walking the list of CPUMIPSStates.  */
726
static CPUMIPSState *mips_cpu_map_tc(int *tc)
727
{
728
    CPUMIPSState *other;
729
    int vpe_idx, nr_threads = env->nr_threads;
730
    int tc_idx = *tc;
731

    
732
    if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
733
        /* Not allowed to address other CPUs.  */
734
        *tc = env->current_tc;
735
        return env;
736
    }
737

    
738
    vpe_idx = tc_idx / nr_threads;
739
    *tc = tc_idx % nr_threads;
740
    other = qemu_get_cpu(vpe_idx);
741
    return other ? other : env;
742
}
743

    
744
/* The per VPE CP0_Status register shares some fields with the per TC
745
   CP0_TCStatus registers. These fields are wired to the same registers,
746
   so changes to either of them should be reflected on both registers.
747

748
   Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
749

750
   These helper call synchronizes the regs for a given cpu.  */
751

    
752
/* Called for updates to CP0_Status.  */
753
static void sync_c0_status(CPUMIPSState *cpu, int tc)
754
{
755
    int32_t tcstatus, *tcst;
756
    uint32_t v = cpu->CP0_Status;
757
    uint32_t cu, mx, asid, ksu;
758
    uint32_t mask = ((1 << CP0TCSt_TCU3)
759
                       | (1 << CP0TCSt_TCU2)
760
                       | (1 << CP0TCSt_TCU1)
761
                       | (1 << CP0TCSt_TCU0)
762
                       | (1 << CP0TCSt_TMX)
763
                       | (3 << CP0TCSt_TKSU)
764
                       | (0xff << CP0TCSt_TASID));
765

    
766
    cu = (v >> CP0St_CU0) & 0xf;
767
    mx = (v >> CP0St_MX) & 0x1;
768
    ksu = (v >> CP0St_KSU) & 0x3;
769
    asid = env->CP0_EntryHi & 0xff;
770

    
771
    tcstatus = cu << CP0TCSt_TCU0;
772
    tcstatus |= mx << CP0TCSt_TMX;
773
    tcstatus |= ksu << CP0TCSt_TKSU;
774
    tcstatus |= asid;
775

    
776
    if (tc == cpu->current_tc) {
777
        tcst = &cpu->active_tc.CP0_TCStatus;
778
    } else {
779
        tcst = &cpu->tcs[tc].CP0_TCStatus;
780
    }
781

    
782
    *tcst &= ~mask;
783
    *tcst |= tcstatus;
784
    compute_hflags(cpu);
785
}
786

    
787
/* Called for updates to CP0_TCStatus.  */
788
static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc, target_ulong v)
789
{
790
    uint32_t status;
791
    uint32_t tcu, tmx, tasid, tksu;
792
    uint32_t mask = ((1 << CP0St_CU3)
793
                       | (1 << CP0St_CU2)
794
                       | (1 << CP0St_CU1)
795
                       | (1 << CP0St_CU0)
796
                       | (1 << CP0St_MX)
797
                       | (3 << CP0St_KSU));
798

    
799
    tcu = (v >> CP0TCSt_TCU0) & 0xf;
800
    tmx = (v >> CP0TCSt_TMX) & 0x1;
801
    tasid = v & 0xff;
802
    tksu = (v >> CP0TCSt_TKSU) & 0x3;
803

    
804
    status = tcu << CP0St_CU0;
805
    status |= tmx << CP0St_MX;
806
    status |= tksu << CP0St_KSU;
807

    
808
    cpu->CP0_Status &= ~mask;
809
    cpu->CP0_Status |= status;
810

    
811
    /* Sync the TASID with EntryHi.  */
812
    cpu->CP0_EntryHi &= ~0xff;
813
    cpu->CP0_EntryHi = tasid;
814

    
815
    compute_hflags(cpu);
816
}
817

    
818
/* Called for updates to CP0_EntryHi.  */
819
static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
820
{
821
    int32_t *tcst;
822
    uint32_t asid, v = cpu->CP0_EntryHi;
823

    
824
    asid = v & 0xff;
825

    
826
    if (tc == cpu->current_tc) {
827
        tcst = &cpu->active_tc.CP0_TCStatus;
828
    } else {
829
        tcst = &cpu->tcs[tc].CP0_TCStatus;
830
    }
831

    
832
    *tcst &= ~0xff;
833
    *tcst |= asid;
834
}
835

    
836
/* CP0 helpers */
837
target_ulong helper_mfc0_mvpcontrol (void)
838
{
839
    return env->mvp->CP0_MVPControl;
840
}
841

    
842
target_ulong helper_mfc0_mvpconf0 (void)
843
{
844
    return env->mvp->CP0_MVPConf0;
845
}
846

    
847
target_ulong helper_mfc0_mvpconf1 (void)
848
{
849
    return env->mvp->CP0_MVPConf1;
850
}
851

    
852
target_ulong helper_mfc0_random (void)
853
{
854
    return (int32_t)cpu_mips_get_random(env);
855
}
856

    
857
target_ulong helper_mfc0_tcstatus (void)
858
{
859
    return env->active_tc.CP0_TCStatus;
860
}
861

    
862
target_ulong helper_mftc0_tcstatus(void)
863
{
864
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
865
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
866

    
867
    if (other_tc == other->current_tc)
868
        return other->active_tc.CP0_TCStatus;
869
    else
870
        return other->tcs[other_tc].CP0_TCStatus;
871
}
872

    
873
target_ulong helper_mfc0_tcbind (void)
874
{
875
    return env->active_tc.CP0_TCBind;
876
}
877

    
878
target_ulong helper_mftc0_tcbind(void)
879
{
880
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
881
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
882

    
883
    if (other_tc == other->current_tc)
884
        return other->active_tc.CP0_TCBind;
885
    else
886
        return other->tcs[other_tc].CP0_TCBind;
887
}
888

    
889
target_ulong helper_mfc0_tcrestart (void)
890
{
891
    return env->active_tc.PC;
892
}
893

    
894
target_ulong helper_mftc0_tcrestart(void)
895
{
896
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
897
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
898

    
899
    if (other_tc == other->current_tc)
900
        return other->active_tc.PC;
901
    else
902
        return other->tcs[other_tc].PC;
903
}
904

    
905
target_ulong helper_mfc0_tchalt (void)
906
{
907
    return env->active_tc.CP0_TCHalt;
908
}
909

    
910
target_ulong helper_mftc0_tchalt(void)
911
{
912
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
913
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
914

    
915
    if (other_tc == other->current_tc)
916
        return other->active_tc.CP0_TCHalt;
917
    else
918
        return other->tcs[other_tc].CP0_TCHalt;
919
}
920

    
921
target_ulong helper_mfc0_tccontext (void)
922
{
923
    return env->active_tc.CP0_TCContext;
924
}
925

    
926
target_ulong helper_mftc0_tccontext(void)
927
{
928
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
929
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
930

    
931
    if (other_tc == other->current_tc)
932
        return other->active_tc.CP0_TCContext;
933
    else
934
        return other->tcs[other_tc].CP0_TCContext;
935
}
936

    
937
target_ulong helper_mfc0_tcschedule (void)
938
{
939
    return env->active_tc.CP0_TCSchedule;
940
}
941

    
942
target_ulong helper_mftc0_tcschedule(void)
943
{
944
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
945
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
946

    
947
    if (other_tc == other->current_tc)
948
        return other->active_tc.CP0_TCSchedule;
949
    else
950
        return other->tcs[other_tc].CP0_TCSchedule;
951
}
952

    
953
target_ulong helper_mfc0_tcschefback (void)
954
{
955
    return env->active_tc.CP0_TCScheFBack;
956
}
957

    
958
target_ulong helper_mftc0_tcschefback(void)
959
{
960
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
961
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
962

    
963
    if (other_tc == other->current_tc)
964
        return other->active_tc.CP0_TCScheFBack;
965
    else
966
        return other->tcs[other_tc].CP0_TCScheFBack;
967
}
968

    
969
target_ulong helper_mfc0_count (void)
970
{
971
    return (int32_t)cpu_mips_get_count(env);
972
}
973

    
974
target_ulong helper_mftc0_entryhi(void)
975
{
976
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
977
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
978

    
979
    return other->CP0_EntryHi;
980
}
981

    
982
target_ulong helper_mftc0_cause(void)
983
{
984
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
985
    int32_t tccause;
986
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
987

    
988
    if (other_tc == other->current_tc) {
989
        tccause = other->CP0_Cause;
990
    } else {
991
        tccause = other->CP0_Cause;
992
    }
993

    
994
    return tccause;
995
}
996

    
997
target_ulong helper_mftc0_status(void)
998
{
999
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1000
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1001

    
1002
    return other->CP0_Status;
1003
}
1004

    
1005
target_ulong helper_mfc0_lladdr (void)
1006
{
1007
    return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
1008
}
1009

    
1010
target_ulong helper_mfc0_watchlo (uint32_t sel)
1011
{
1012
    return (int32_t)env->CP0_WatchLo[sel];
1013
}
1014

    
1015
target_ulong helper_mfc0_watchhi (uint32_t sel)
1016
{
1017
    return env->CP0_WatchHi[sel];
1018
}
1019

    
1020
target_ulong helper_mfc0_debug (void)
1021
{
1022
    target_ulong t0 = env->CP0_Debug;
1023
    if (env->hflags & MIPS_HFLAG_DM)
1024
        t0 |= 1 << CP0DB_DM;
1025

    
1026
    return t0;
1027
}
1028

    
1029
target_ulong helper_mftc0_debug(void)
1030
{
1031
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1032
    int32_t tcstatus;
1033
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1034

    
1035
    if (other_tc == other->current_tc)
1036
        tcstatus = other->active_tc.CP0_Debug_tcstatus;
1037
    else
1038
        tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
1039

    
1040
    /* XXX: Might be wrong, check with EJTAG spec. */
1041
    return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1042
            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1043
}
1044

    
1045
#if defined(TARGET_MIPS64)
1046
target_ulong helper_dmfc0_tcrestart (void)
1047
{
1048
    return env->active_tc.PC;
1049
}
1050

    
1051
target_ulong helper_dmfc0_tchalt (void)
1052
{
1053
    return env->active_tc.CP0_TCHalt;
1054
}
1055

    
1056
target_ulong helper_dmfc0_tccontext (void)
1057
{
1058
    return env->active_tc.CP0_TCContext;
1059
}
1060

    
1061
target_ulong helper_dmfc0_tcschedule (void)
1062
{
1063
    return env->active_tc.CP0_TCSchedule;
1064
}
1065

    
1066
target_ulong helper_dmfc0_tcschefback (void)
1067
{
1068
    return env->active_tc.CP0_TCScheFBack;
1069
}
1070

    
1071
target_ulong helper_dmfc0_lladdr (void)
1072
{
1073
    return env->lladdr >> env->CP0_LLAddr_shift;
1074
}
1075

    
1076
target_ulong helper_dmfc0_watchlo (uint32_t sel)
1077
{
1078
    return env->CP0_WatchLo[sel];
1079
}
1080
#endif /* TARGET_MIPS64 */
1081

    
1082
void helper_mtc0_index (target_ulong arg1)
1083
{
1084
    int num = 1;
1085
    unsigned int tmp = env->tlb->nb_tlb;
1086

    
1087
    do {
1088
        tmp >>= 1;
1089
        num <<= 1;
1090
    } while (tmp);
1091
    env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
1092
}
1093

    
1094
void helper_mtc0_mvpcontrol (target_ulong arg1)
1095
{
1096
    uint32_t mask = 0;
1097
    uint32_t newval;
1098

    
1099
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1100
        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1101
                (1 << CP0MVPCo_EVP);
1102
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1103
        mask |= (1 << CP0MVPCo_STLB);
1104
    newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1105

    
1106
    // TODO: Enable/disable shared TLB, enable/disable VPEs.
1107

    
1108
    env->mvp->CP0_MVPControl = newval;
1109
}
1110

    
1111
void helper_mtc0_vpecontrol (target_ulong arg1)
1112
{
1113
    uint32_t mask;
1114
    uint32_t newval;
1115

    
1116
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1117
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1118
    newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1119

    
1120
    /* Yield scheduler intercept not implemented. */
1121
    /* Gating storage scheduler intercept not implemented. */
1122

    
1123
    // TODO: Enable/disable TCs.
1124

    
1125
    env->CP0_VPEControl = newval;
1126
}
1127

    
1128
void helper_mttc0_vpecontrol(target_ulong arg1)
1129
{
1130
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1131
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1132
    uint32_t mask;
1133
    uint32_t newval;
1134

    
1135
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1136
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1137
    newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
1138

    
1139
    /* TODO: Enable/disable TCs.  */
1140

    
1141
    other->CP0_VPEControl = newval;
1142
}
1143

    
1144
target_ulong helper_mftc0_vpecontrol(void)
1145
{
1146
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1147
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1148
    /* FIXME: Mask away return zero on read bits.  */
1149
    return other->CP0_VPEControl;
1150
}
1151

    
1152
target_ulong helper_mftc0_vpeconf0(void)
1153
{
1154
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1155
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1156

    
1157
    return other->CP0_VPEConf0;
1158
}
1159

    
1160
void helper_mtc0_vpeconf0 (target_ulong arg1)
1161
{
1162
    uint32_t mask = 0;
1163
    uint32_t newval;
1164

    
1165
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1166
        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1167
            mask |= (0xff << CP0VPEC0_XTC);
1168
        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1169
    }
1170
    newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1171

    
1172
    // TODO: TC exclusive handling due to ERL/EXL.
1173

    
1174
    env->CP0_VPEConf0 = newval;
1175
}
1176

    
1177
void helper_mttc0_vpeconf0(target_ulong arg1)
1178
{
1179
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1180
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1181
    uint32_t mask = 0;
1182
    uint32_t newval;
1183

    
1184
    mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1185
    newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1186

    
1187
    /* TODO: TC exclusive handling due to ERL/EXL.  */
1188
    other->CP0_VPEConf0 = newval;
1189
}
1190

    
1191
void helper_mtc0_vpeconf1 (target_ulong arg1)
1192
{
1193
    uint32_t mask = 0;
1194
    uint32_t newval;
1195

    
1196
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1197
        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1198
                (0xff << CP0VPEC1_NCP1);
1199
    newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1200

    
1201
    /* UDI not implemented. */
1202
    /* CP2 not implemented. */
1203

    
1204
    // TODO: Handle FPU (CP1) binding.
1205

    
1206
    env->CP0_VPEConf1 = newval;
1207
}
1208

    
1209
void helper_mtc0_yqmask (target_ulong arg1)
1210
{
1211
    /* Yield qualifier inputs not implemented. */
1212
    env->CP0_YQMask = 0x00000000;
1213
}
1214

    
1215
void helper_mtc0_vpeopt (target_ulong arg1)
1216
{
1217
    env->CP0_VPEOpt = arg1 & 0x0000ffff;
1218
}
1219

    
1220
void helper_mtc0_entrylo0 (target_ulong arg1)
1221
{
1222
    /* Large physaddr (PABITS) not implemented */
1223
    /* 1k pages not implemented */
1224
    env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1225
}
1226

    
1227
void helper_mtc0_tcstatus (target_ulong arg1)
1228
{
1229
    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1230
    uint32_t newval;
1231

    
1232
    newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1233

    
1234
    env->active_tc.CP0_TCStatus = newval;
1235
    sync_c0_tcstatus(env, env->current_tc, newval);
1236
}
1237

    
1238
void helper_mttc0_tcstatus (target_ulong arg1)
1239
{
1240
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1241
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1242

    
1243
    if (other_tc == other->current_tc)
1244
        other->active_tc.CP0_TCStatus = arg1;
1245
    else
1246
        other->tcs[other_tc].CP0_TCStatus = arg1;
1247
    sync_c0_tcstatus(other, other_tc, arg1);
1248
}
1249

    
1250
void helper_mtc0_tcbind (target_ulong arg1)
1251
{
1252
    uint32_t mask = (1 << CP0TCBd_TBE);
1253
    uint32_t newval;
1254

    
1255
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1256
        mask |= (1 << CP0TCBd_CurVPE);
1257
    newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1258
    env->active_tc.CP0_TCBind = newval;
1259
}
1260

    
1261
void helper_mttc0_tcbind (target_ulong arg1)
1262
{
1263
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1264
    uint32_t mask = (1 << CP0TCBd_TBE);
1265
    uint32_t newval;
1266
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1267

    
1268
    if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1269
        mask |= (1 << CP0TCBd_CurVPE);
1270
    if (other_tc == other->current_tc) {
1271
        newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1272
        other->active_tc.CP0_TCBind = newval;
1273
    } else {
1274
        newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1275
        other->tcs[other_tc].CP0_TCBind = newval;
1276
    }
1277
}
1278

    
1279
void helper_mtc0_tcrestart (target_ulong arg1)
1280
{
1281
    env->active_tc.PC = arg1;
1282
    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1283
    env->lladdr = 0ULL;
1284
    /* MIPS16 not implemented. */
1285
}
1286

    
1287
void helper_mttc0_tcrestart (target_ulong arg1)
1288
{
1289
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1290
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1291

    
1292
    if (other_tc == other->current_tc) {
1293
        other->active_tc.PC = arg1;
1294
        other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1295
        other->lladdr = 0ULL;
1296
        /* MIPS16 not implemented. */
1297
    } else {
1298
        other->tcs[other_tc].PC = arg1;
1299
        other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1300
        other->lladdr = 0ULL;
1301
        /* MIPS16 not implemented. */
1302
    }
1303
}
1304

    
1305
void helper_mtc0_tchalt (target_ulong arg1)
1306
{
1307
    env->active_tc.CP0_TCHalt = arg1 & 0x1;
1308

    
1309
    // TODO: Halt TC / Restart (if allocated+active) TC.
1310
    if (env->active_tc.CP0_TCHalt & 1) {
1311
        mips_tc_sleep(env, env->current_tc);
1312
    } else {
1313
        mips_tc_wake(env, env->current_tc);
1314
    }
1315
}
1316

    
1317
void helper_mttc0_tchalt (target_ulong arg1)
1318
{
1319
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1320
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1321

    
1322
    // TODO: Halt TC / Restart (if allocated+active) TC.
1323

    
1324
    if (other_tc == other->current_tc)
1325
        other->active_tc.CP0_TCHalt = arg1;
1326
    else
1327
        other->tcs[other_tc].CP0_TCHalt = arg1;
1328

    
1329
    if (arg1 & 1) {
1330
        mips_tc_sleep(other, other_tc);
1331
    } else {
1332
        mips_tc_wake(other, other_tc);
1333
    }
1334
}
1335

    
1336
void helper_mtc0_tccontext (target_ulong arg1)
1337
{
1338
    env->active_tc.CP0_TCContext = arg1;
1339
}
1340

    
1341
void helper_mttc0_tccontext (target_ulong arg1)
1342
{
1343
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1344
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1345

    
1346
    if (other_tc == other->current_tc)
1347
        other->active_tc.CP0_TCContext = arg1;
1348
    else
1349
        other->tcs[other_tc].CP0_TCContext = arg1;
1350
}
1351

    
1352
void helper_mtc0_tcschedule (target_ulong arg1)
1353
{
1354
    env->active_tc.CP0_TCSchedule = arg1;
1355
}
1356

    
1357
void helper_mttc0_tcschedule (target_ulong arg1)
1358
{
1359
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1360
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1361

    
1362
    if (other_tc == other->current_tc)
1363
        other->active_tc.CP0_TCSchedule = arg1;
1364
    else
1365
        other->tcs[other_tc].CP0_TCSchedule = arg1;
1366
}
1367

    
1368
void helper_mtc0_tcschefback (target_ulong arg1)
1369
{
1370
    env->active_tc.CP0_TCScheFBack = arg1;
1371
}
1372

    
1373
void helper_mttc0_tcschefback (target_ulong arg1)
1374
{
1375
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1376
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1377

    
1378
    if (other_tc == other->current_tc)
1379
        other->active_tc.CP0_TCScheFBack = arg1;
1380
    else
1381
        other->tcs[other_tc].CP0_TCScheFBack = arg1;
1382
}
1383

    
1384
void helper_mtc0_entrylo1 (target_ulong arg1)
1385
{
1386
    /* Large physaddr (PABITS) not implemented */
1387
    /* 1k pages not implemented */
1388
    env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1389
}
1390

    
1391
void helper_mtc0_context (target_ulong arg1)
1392
{
1393
    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1394
}
1395

    
1396
void helper_mtc0_pagemask (target_ulong arg1)
1397
{
1398
    /* 1k pages not implemented */
1399
    env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1400
}
1401

    
1402
void helper_mtc0_pagegrain (target_ulong arg1)
1403
{
1404
    /* SmartMIPS not implemented */
1405
    /* Large physaddr (PABITS) not implemented */
1406
    /* 1k pages not implemented */
1407
    env->CP0_PageGrain = 0;
1408
}
1409

    
1410
void helper_mtc0_wired (target_ulong arg1)
1411
{
1412
    env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1413
}
1414

    
1415
void helper_mtc0_srsconf0 (target_ulong arg1)
1416
{
1417
    env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1418
}
1419

    
1420
void helper_mtc0_srsconf1 (target_ulong arg1)
1421
{
1422
    env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1423
}
1424

    
1425
void helper_mtc0_srsconf2 (target_ulong arg1)
1426
{
1427
    env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1428
}
1429

    
1430
void helper_mtc0_srsconf3 (target_ulong arg1)
1431
{
1432
    env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1433
}
1434

    
1435
void helper_mtc0_srsconf4 (target_ulong arg1)
1436
{
1437
    env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1438
}
1439

    
1440
void helper_mtc0_hwrena (target_ulong arg1)
1441
{
1442
    env->CP0_HWREna = arg1 & 0x0000000F;
1443
}
1444

    
1445
void helper_mtc0_count (target_ulong arg1)
1446
{
1447
    cpu_mips_store_count(env, arg1);
1448
}
1449

    
1450
void helper_mtc0_entryhi (target_ulong arg1)
1451
{
1452
    target_ulong old, val;
1453

    
1454
    /* 1k pages not implemented */
1455
    val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1456
#if defined(TARGET_MIPS64)
1457
    val &= env->SEGMask;
1458
#endif
1459
    old = env->CP0_EntryHi;
1460
    env->CP0_EntryHi = val;
1461
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1462
        sync_c0_entryhi(env, env->current_tc);
1463
    }
1464
    /* If the ASID changes, flush qemu's TLB.  */
1465
    if ((old & 0xFF) != (val & 0xFF))
1466
        cpu_mips_tlb_flush(env, 1);
1467
}
1468

    
1469
void helper_mttc0_entryhi(target_ulong arg1)
1470
{
1471
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1472
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1473

    
1474
    other->CP0_EntryHi = arg1;
1475
    sync_c0_entryhi(other, other_tc);
1476
}
1477

    
1478
void helper_mtc0_compare (target_ulong arg1)
1479
{
1480
    cpu_mips_store_compare(env, arg1);
1481
}
1482

    
1483
void helper_mtc0_status (target_ulong arg1)
1484
{
1485
    uint32_t val, old;
1486
    uint32_t mask = env->CP0_Status_rw_bitmask;
1487

    
1488
    val = arg1 & mask;
1489
    old = env->CP0_Status;
1490
    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1491
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1492
        sync_c0_status(env, env->current_tc);
1493
    } else {
1494
        compute_hflags(env);
1495
    }
1496

    
1497
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1498
        qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1499
                old, old & env->CP0_Cause & CP0Ca_IP_mask,
1500
                val, val & env->CP0_Cause & CP0Ca_IP_mask,
1501
                env->CP0_Cause);
1502
        switch (env->hflags & MIPS_HFLAG_KSU) {
1503
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1504
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1505
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1506
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1507
        }
1508
    }
1509
}
1510

    
1511
void helper_mttc0_status(target_ulong arg1)
1512
{
1513
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1514
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1515

    
1516
    other->CP0_Status = arg1 & ~0xf1000018;
1517
    sync_c0_status(other, other_tc);
1518
}
1519

    
1520
void helper_mtc0_intctl (target_ulong arg1)
1521
{
1522
    /* vectored interrupts not implemented, no performance counters. */
1523
    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1524
}
1525

    
1526
void helper_mtc0_srsctl (target_ulong arg1)
1527
{
1528
    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1529
    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1530
}
1531

    
1532
static void mtc0_cause(CPUMIPSState *cpu, target_ulong arg1)
1533
{
1534
    uint32_t mask = 0x00C00300;
1535
    uint32_t old = cpu->CP0_Cause;
1536
    int i;
1537

    
1538
    if (cpu->insn_flags & ISA_MIPS32R2) {
1539
        mask |= 1 << CP0Ca_DC;
1540
    }
1541

    
1542
    cpu->CP0_Cause = (cpu->CP0_Cause & ~mask) | (arg1 & mask);
1543

    
1544
    if ((old ^ cpu->CP0_Cause) & (1 << CP0Ca_DC)) {
1545
        if (cpu->CP0_Cause & (1 << CP0Ca_DC)) {
1546
            cpu_mips_stop_count(cpu);
1547
        } else {
1548
            cpu_mips_start_count(cpu);
1549
        }
1550
    }
1551

    
1552
    /* Set/reset software interrupts */
1553
    for (i = 0 ; i < 2 ; i++) {
1554
        if ((old ^ cpu->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1555
            cpu_mips_soft_irq(cpu, i, cpu->CP0_Cause & (1 << (CP0Ca_IP + i)));
1556
        }
1557
    }
1558
}
1559

    
1560
void helper_mtc0_cause(target_ulong arg1)
1561
{
1562
    mtc0_cause(env, arg1);
1563
}
1564

    
1565
void helper_mttc0_cause(target_ulong arg1)
1566
{
1567
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1568
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1569

    
1570
    mtc0_cause(other, arg1);
1571
}
1572

    
1573
target_ulong helper_mftc0_epc(void)
1574
{
1575
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1576
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1577

    
1578
    return other->CP0_EPC;
1579
}
1580

    
1581
target_ulong helper_mftc0_ebase(void)
1582
{
1583
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1584
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1585

    
1586
    return other->CP0_EBase;
1587
}
1588

    
1589
void helper_mtc0_ebase (target_ulong arg1)
1590
{
1591
    /* vectored interrupts not implemented */
1592
    env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1593
}
1594

    
1595
void helper_mttc0_ebase(target_ulong arg1)
1596
{
1597
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1598
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1599
    other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1600
}
1601

    
1602
target_ulong helper_mftc0_configx(target_ulong idx)
1603
{
1604
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1605
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1606

    
1607
    switch (idx) {
1608
    case 0: return other->CP0_Config0;
1609
    case 1: return other->CP0_Config1;
1610
    case 2: return other->CP0_Config2;
1611
    case 3: return other->CP0_Config3;
1612
    /* 4 and 5 are reserved.  */
1613
    case 6: return other->CP0_Config6;
1614
    case 7: return other->CP0_Config7;
1615
    default:
1616
        break;
1617
    }
1618
    return 0;
1619
}
1620

    
1621
void helper_mtc0_config0 (target_ulong arg1)
1622
{
1623
    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1624
}
1625

    
1626
void helper_mtc0_config2 (target_ulong arg1)
1627
{
1628
    /* tertiary/secondary caches not implemented */
1629
    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1630
}
1631

    
1632
void helper_mtc0_lladdr (target_ulong arg1)
1633
{
1634
    target_long mask = env->CP0_LLAddr_rw_bitmask;
1635
    arg1 = arg1 << env->CP0_LLAddr_shift;
1636
    env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1637
}
1638

    
1639
void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1640
{
1641
    /* Watch exceptions for instructions, data loads, data stores
1642
       not implemented. */
1643
    env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1644
}
1645

    
1646
void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1647
{
1648
    env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1649
    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1650
}
1651

    
1652
void helper_mtc0_xcontext (target_ulong arg1)
1653
{
1654
    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1655
    env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1656
}
1657

    
1658
void helper_mtc0_framemask (target_ulong arg1)
1659
{
1660
    env->CP0_Framemask = arg1; /* XXX */
1661
}
1662

    
1663
void helper_mtc0_debug (target_ulong arg1)
1664
{
1665
    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1666
    if (arg1 & (1 << CP0DB_DM))
1667
        env->hflags |= MIPS_HFLAG_DM;
1668
    else
1669
        env->hflags &= ~MIPS_HFLAG_DM;
1670
}
1671

    
1672
void helper_mttc0_debug(target_ulong arg1)
1673
{
1674
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1675
    uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1676
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1677

    
1678
    /* XXX: Might be wrong, check with EJTAG spec. */
1679
    if (other_tc == other->current_tc)
1680
        other->active_tc.CP0_Debug_tcstatus = val;
1681
    else
1682
        other->tcs[other_tc].CP0_Debug_tcstatus = val;
1683
    other->CP0_Debug = (other->CP0_Debug &
1684
                     ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1685
                     (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1686
}
1687

    
1688
void helper_mtc0_performance0 (target_ulong arg1)
1689
{
1690
    env->CP0_Performance0 = arg1 & 0x000007ff;
1691
}
1692

    
1693
void helper_mtc0_taglo (target_ulong arg1)
1694
{
1695
    env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1696
}
1697

    
1698
void helper_mtc0_datalo (target_ulong arg1)
1699
{
1700
    env->CP0_DataLo = arg1; /* XXX */
1701
}
1702

    
1703
void helper_mtc0_taghi (target_ulong arg1)
1704
{
1705
    env->CP0_TagHi = arg1; /* XXX */
1706
}
1707

    
1708
void helper_mtc0_datahi (target_ulong arg1)
1709
{
1710
    env->CP0_DataHi = arg1; /* XXX */
1711
}
1712

    
1713
/* MIPS MT functions */
1714
target_ulong helper_mftgpr(uint32_t sel)
1715
{
1716
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1717
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1718

    
1719
    if (other_tc == other->current_tc)
1720
        return other->active_tc.gpr[sel];
1721
    else
1722
        return other->tcs[other_tc].gpr[sel];
1723
}
1724

    
1725
target_ulong helper_mftlo(uint32_t sel)
1726
{
1727
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1728
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1729

    
1730
    if (other_tc == other->current_tc)
1731
        return other->active_tc.LO[sel];
1732
    else
1733
        return other->tcs[other_tc].LO[sel];
1734
}
1735

    
1736
target_ulong helper_mfthi(uint32_t sel)
1737
{
1738
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1739
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1740

    
1741
    if (other_tc == other->current_tc)
1742
        return other->active_tc.HI[sel];
1743
    else
1744
        return other->tcs[other_tc].HI[sel];
1745
}
1746

    
1747
target_ulong helper_mftacx(uint32_t sel)
1748
{
1749
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1750
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1751

    
1752
    if (other_tc == other->current_tc)
1753
        return other->active_tc.ACX[sel];
1754
    else
1755
        return other->tcs[other_tc].ACX[sel];
1756
}
1757

    
1758
target_ulong helper_mftdsp(void)
1759
{
1760
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1761
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1762

    
1763
    if (other_tc == other->current_tc)
1764
        return other->active_tc.DSPControl;
1765
    else
1766
        return other->tcs[other_tc].DSPControl;
1767
}
1768

    
1769
void helper_mttgpr(target_ulong arg1, uint32_t sel)
1770
{
1771
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1772
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1773

    
1774
    if (other_tc == other->current_tc)
1775
        other->active_tc.gpr[sel] = arg1;
1776
    else
1777
        other->tcs[other_tc].gpr[sel] = arg1;
1778
}
1779

    
1780
void helper_mttlo(target_ulong arg1, uint32_t sel)
1781
{
1782
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1783
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1784

    
1785
    if (other_tc == other->current_tc)
1786
        other->active_tc.LO[sel] = arg1;
1787
    else
1788
        other->tcs[other_tc].LO[sel] = arg1;
1789
}
1790

    
1791
void helper_mtthi(target_ulong arg1, uint32_t sel)
1792
{
1793
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1794
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1795

    
1796
    if (other_tc == other->current_tc)
1797
        other->active_tc.HI[sel] = arg1;
1798
    else
1799
        other->tcs[other_tc].HI[sel] = arg1;
1800
}
1801

    
1802
void helper_mttacx(target_ulong arg1, uint32_t sel)
1803
{
1804
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1805
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1806

    
1807
    if (other_tc == other->current_tc)
1808
        other->active_tc.ACX[sel] = arg1;
1809
    else
1810
        other->tcs[other_tc].ACX[sel] = arg1;
1811
}
1812

    
1813
void helper_mttdsp(target_ulong arg1)
1814
{
1815
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1816
    CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1817

    
1818
    if (other_tc == other->current_tc)
1819
        other->active_tc.DSPControl = arg1;
1820
    else
1821
        other->tcs[other_tc].DSPControl = arg1;
1822
}
1823

    
1824
/* MIPS MT functions */
1825
target_ulong helper_dmt(void)
1826
{
1827
    // TODO
1828
     return 0;
1829
}
1830

    
1831
target_ulong helper_emt(void)
1832
{
1833
    // TODO
1834
    return 0;
1835
}
1836

    
1837
target_ulong helper_dvpe(void)
1838
{
1839
    CPUMIPSState *other_cpu = first_cpu;
1840
    target_ulong prev = env->mvp->CP0_MVPControl;
1841

    
1842
    do {
1843
        /* Turn off all VPEs except the one executing the dvpe.  */
1844
        if (other_cpu != env) {
1845
            other_cpu->mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1846
            mips_vpe_sleep(other_cpu);
1847
        }
1848
        other_cpu = other_cpu->next_cpu;
1849
    } while (other_cpu);
1850
    return prev;
1851
}
1852

    
1853
target_ulong helper_evpe(void)
1854
{
1855
    CPUMIPSState *other_cpu = first_cpu;
1856
    target_ulong prev = env->mvp->CP0_MVPControl;
1857

    
1858
    do {
1859
        if (other_cpu != env
1860
           /* If the VPE is WFI, don't disturb its sleep.  */
1861
           && !mips_vpe_is_wfi(other_cpu)) {
1862
            /* Enable the VPE.  */
1863
            other_cpu->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1864
            mips_vpe_wake(other_cpu); /* And wake it up.  */
1865
        }
1866
        other_cpu = other_cpu->next_cpu;
1867
    } while (other_cpu);
1868
    return prev;
1869
}
1870
#endif /* !CONFIG_USER_ONLY */
1871

    
1872
void helper_fork(target_ulong arg1, target_ulong arg2)
1873
{
1874
    // arg1 = rt, arg2 = rs
1875
    arg1 = 0;
1876
    // TODO: store to TC register
1877
}
1878

    
1879
target_ulong helper_yield(target_ulong arg)
1880
{
1881
    target_long arg1 = arg;
1882

    
1883
    if (arg1 < 0) {
1884
        /* No scheduling policy implemented. */
1885
        if (arg1 != -2) {
1886
            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1887
                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1888
                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1889
                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1890
                helper_raise_exception(EXCP_THREAD);
1891
            }
1892
        }
1893
    } else if (arg1 == 0) {
1894
        if (0 /* TODO: TC underflow */) {
1895
            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1896
            helper_raise_exception(EXCP_THREAD);
1897
        } else {
1898
            // TODO: Deallocate TC
1899
        }
1900
    } else if (arg1 > 0) {
1901
        /* Yield qualifier inputs not implemented. */
1902
        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1903
        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1904
        helper_raise_exception(EXCP_THREAD);
1905
    }
1906
    return env->CP0_YQMask;
1907
}
1908

    
1909
#ifndef CONFIG_USER_ONLY
1910
/* TLB management */
1911
static void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global)
1912
{
1913
    /* Flush qemu's TLB and discard all shadowed entries.  */
1914
    tlb_flush (env, flush_global);
1915
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
1916
}
1917

    
1918
static void r4k_mips_tlb_flush_extra (CPUMIPSState *env, int first)
1919
{
1920
    /* Discard entries from env->tlb[first] onwards.  */
1921
    while (env->tlb->tlb_in_use > first) {
1922
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1923
    }
1924
}
1925

    
1926
static void r4k_fill_tlb (int idx)
1927
{
1928
    r4k_tlb_t *tlb;
1929

    
1930
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1931
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1932
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1933
#if defined(TARGET_MIPS64)
1934
    tlb->VPN &= env->SEGMask;
1935
#endif
1936
    tlb->ASID = env->CP0_EntryHi & 0xFF;
1937
    tlb->PageMask = env->CP0_PageMask;
1938
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1939
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1940
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1941
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1942
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1943
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1944
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1945
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1946
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1947
}
1948

    
1949
void r4k_helper_tlbwi (void)
1950
{
1951
    int idx;
1952

    
1953
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1954

    
1955
    /* Discard cached TLB entries.  We could avoid doing this if the
1956
       tlbwi is just upgrading access permissions on the current entry;
1957
       that might be a further win.  */
1958
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1959

    
1960
    r4k_invalidate_tlb(env, idx, 0);
1961
    r4k_fill_tlb(idx);
1962
}
1963

    
1964
void r4k_helper_tlbwr (void)
1965
{
1966
    int r = cpu_mips_get_random(env);
1967

    
1968
    r4k_invalidate_tlb(env, r, 1);
1969
    r4k_fill_tlb(r);
1970
}
1971

    
1972
void r4k_helper_tlbp (void)
1973
{
1974
    r4k_tlb_t *tlb;
1975
    target_ulong mask;
1976
    target_ulong tag;
1977
    target_ulong VPN;
1978
    uint8_t ASID;
1979
    int i;
1980

    
1981
    ASID = env->CP0_EntryHi & 0xFF;
1982
    for (i = 0; i < env->tlb->nb_tlb; i++) {
1983
        tlb = &env->tlb->mmu.r4k.tlb[i];
1984
        /* 1k pages are not supported. */
1985
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1986
        tag = env->CP0_EntryHi & ~mask;
1987
        VPN = tlb->VPN & ~mask;
1988
        /* Check ASID, virtual page number & size */
1989
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1990
            /* TLB match */
1991
            env->CP0_Index = i;
1992
            break;
1993
        }
1994
    }
1995
    if (i == env->tlb->nb_tlb) {
1996
        /* No match.  Discard any shadow entries, if any of them match.  */
1997
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1998
            tlb = &env->tlb->mmu.r4k.tlb[i];
1999
            /* 1k pages are not supported. */
2000
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2001
            tag = env->CP0_EntryHi & ~mask;
2002
            VPN = tlb->VPN & ~mask;
2003
            /* Check ASID, virtual page number & size */
2004
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2005
                r4k_mips_tlb_flush_extra (env, i);
2006
                break;
2007
            }
2008
        }
2009

    
2010
        env->CP0_Index |= 0x80000000;
2011
    }
2012
}
2013

    
2014
void r4k_helper_tlbr (void)
2015
{
2016
    r4k_tlb_t *tlb;
2017
    uint8_t ASID;
2018
    int idx;
2019

    
2020
    ASID = env->CP0_EntryHi & 0xFF;
2021
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2022
    tlb = &env->tlb->mmu.r4k.tlb[idx];
2023

    
2024
    /* If this will change the current ASID, flush qemu's TLB.  */
2025
    if (ASID != tlb->ASID)
2026
        cpu_mips_tlb_flush (env, 1);
2027

    
2028
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
2029

    
2030
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
2031
    env->CP0_PageMask = tlb->PageMask;
2032
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
2033
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
2034
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
2035
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
2036
}
2037

    
2038
void helper_tlbwi(void)
2039
{
2040
    env->tlb->helper_tlbwi();
2041
}
2042

    
2043
void helper_tlbwr(void)
2044
{
2045
    env->tlb->helper_tlbwr();
2046
}
2047

    
2048
void helper_tlbp(void)
2049
{
2050
    env->tlb->helper_tlbp();
2051
}
2052

    
2053
void helper_tlbr(void)
2054
{
2055
    env->tlb->helper_tlbr();
2056
}
2057

    
2058
/* Specials */
2059
target_ulong helper_di (void)
2060
{
2061
    target_ulong t0 = env->CP0_Status;
2062

    
2063
    env->CP0_Status = t0 & ~(1 << CP0St_IE);
2064
    return t0;
2065
}
2066

    
2067
target_ulong helper_ei (void)
2068
{
2069
    target_ulong t0 = env->CP0_Status;
2070

    
2071
    env->CP0_Status = t0 | (1 << CP0St_IE);
2072
    return t0;
2073
}
2074

    
2075
static void debug_pre_eret (void)
2076
{
2077
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2078
        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2079
                env->active_tc.PC, env->CP0_EPC);
2080
        if (env->CP0_Status & (1 << CP0St_ERL))
2081
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2082
        if (env->hflags & MIPS_HFLAG_DM)
2083
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2084
        qemu_log("\n");
2085
    }
2086
}
2087

    
2088
static void debug_post_eret (void)
2089
{
2090
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2091
        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2092
                env->active_tc.PC, env->CP0_EPC);
2093
        if (env->CP0_Status & (1 << CP0St_ERL))
2094
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2095
        if (env->hflags & MIPS_HFLAG_DM)
2096
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2097
        switch (env->hflags & MIPS_HFLAG_KSU) {
2098
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
2099
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
2100
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
2101
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
2102
        }
2103
    }
2104
}
2105

    
2106
static void set_pc (target_ulong error_pc)
2107
{
2108
    env->active_tc.PC = error_pc & ~(target_ulong)1;
2109
    if (error_pc & 1) {
2110
        env->hflags |= MIPS_HFLAG_M16;
2111
    } else {
2112
        env->hflags &= ~(MIPS_HFLAG_M16);
2113
    }
2114
}
2115

    
2116
void helper_eret (void)
2117
{
2118
    debug_pre_eret();
2119
    if (env->CP0_Status & (1 << CP0St_ERL)) {
2120
        set_pc(env->CP0_ErrorEPC);
2121
        env->CP0_Status &= ~(1 << CP0St_ERL);
2122
    } else {
2123
        set_pc(env->CP0_EPC);
2124
        env->CP0_Status &= ~(1 << CP0St_EXL);
2125
    }
2126
    compute_hflags(env);
2127
    debug_post_eret();
2128
    env->lladdr = 1;
2129
}
2130

    
2131
void helper_deret (void)
2132
{
2133
    debug_pre_eret();
2134
    set_pc(env->CP0_DEPC);
2135

    
2136
    env->hflags &= MIPS_HFLAG_DM;
2137
    compute_hflags(env);
2138
    debug_post_eret();
2139
    env->lladdr = 1;
2140
}
2141
#endif /* !CONFIG_USER_ONLY */
2142

    
2143
target_ulong helper_rdhwr_cpunum(void)
2144
{
2145
    if ((env->hflags & MIPS_HFLAG_CP0) ||
2146
        (env->CP0_HWREna & (1 << 0)))
2147
        return env->CP0_EBase & 0x3ff;
2148
    else
2149
        helper_raise_exception(EXCP_RI);
2150

    
2151
    return 0;
2152
}
2153

    
2154
target_ulong helper_rdhwr_synci_step(void)
2155
{
2156
    if ((env->hflags & MIPS_HFLAG_CP0) ||
2157
        (env->CP0_HWREna & (1 << 1)))
2158
        return env->SYNCI_Step;
2159
    else
2160
        helper_raise_exception(EXCP_RI);
2161

    
2162
    return 0;
2163
}
2164

    
2165
target_ulong helper_rdhwr_cc(void)
2166
{
2167
    if ((env->hflags & MIPS_HFLAG_CP0) ||
2168
        (env->CP0_HWREna & (1 << 2)))
2169
        return env->CP0_Count;
2170
    else
2171
        helper_raise_exception(EXCP_RI);
2172

    
2173
    return 0;
2174
}
2175

    
2176
target_ulong helper_rdhwr_ccres(void)
2177
{
2178
    if ((env->hflags & MIPS_HFLAG_CP0) ||
2179
        (env->CP0_HWREna & (1 << 3)))
2180
        return env->CCRes;
2181
    else
2182
        helper_raise_exception(EXCP_RI);
2183

    
2184
    return 0;
2185
}
2186

    
2187
void helper_pmon (int function)
2188
{
2189
    function /= 2;
2190
    switch (function) {
2191
    case 2: /* TODO: char inbyte(int waitflag); */
2192
        if (env->active_tc.gpr[4] == 0)
2193
            env->active_tc.gpr[2] = -1;
2194
        /* Fall through */
2195
    case 11: /* TODO: char inbyte (void); */
2196
        env->active_tc.gpr[2] = -1;
2197
        break;
2198
    case 3:
2199
    case 12:
2200
        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2201
        break;
2202
    case 17:
2203
        break;
2204
    case 158:
2205
        {
2206
            unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
2207
            printf("%s", fmt);
2208
        }
2209
        break;
2210
    }
2211
}
2212

    
2213
void helper_wait (void)
2214
{
2215
    env->halted = 1;
2216
    cpu_reset_interrupt(env, CPU_INTERRUPT_WAKE);
2217
    helper_raise_exception(EXCP_HLT);
2218
}
2219

    
2220
#if !defined(CONFIG_USER_ONLY)
2221

    
2222
static void QEMU_NORETURN do_unaligned_access(target_ulong addr, int is_write,
2223
                                              int is_user, uintptr_t retaddr);
2224

    
2225
#define MMUSUFFIX _mmu
2226
#define ALIGNED_ONLY
2227

    
2228
#define SHIFT 0
2229
#include "softmmu_template.h"
2230

    
2231
#define SHIFT 1
2232
#include "softmmu_template.h"
2233

    
2234
#define SHIFT 2
2235
#include "softmmu_template.h"
2236

    
2237
#define SHIFT 3
2238
#include "softmmu_template.h"
2239

    
2240
static void do_unaligned_access(target_ulong addr, int is_write,
2241
                                int is_user, uintptr_t retaddr)
2242
{
2243
    env->CP0_BadVAddr = addr;
2244
    do_restore_state (retaddr);
2245
    helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
2246
}
2247

    
2248
void tlb_fill(CPUMIPSState *env1, target_ulong addr, int is_write, int mmu_idx,
2249
              uintptr_t retaddr)
2250
{
2251
    TranslationBlock *tb;
2252
    CPUMIPSState *saved_env;
2253
    int ret;
2254

    
2255
    saved_env = env;
2256
    env = env1;
2257
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2258
    if (ret) {
2259
        if (retaddr) {
2260
            /* now we have a real cpu fault */
2261
            tb = tb_find_pc(retaddr);
2262
            if (tb) {
2263
                /* the PC is inside the translated code. It means that we have
2264
                   a virtual CPU fault */
2265
                cpu_restore_state(tb, env, retaddr);
2266
            }
2267
        }
2268
        helper_raise_exception_err(env->exception_index, env->error_code);
2269
    }
2270
    env = saved_env;
2271
}
2272

    
2273
void cpu_unassigned_access(CPUMIPSState *env1, target_phys_addr_t addr,
2274
                           int is_write, int is_exec, int unused, int size)
2275
{
2276
    env = env1;
2277

    
2278
    if (is_exec)
2279
        helper_raise_exception(EXCP_IBE);
2280
    else
2281
        helper_raise_exception(EXCP_DBE);
2282
}
2283
#endif /* !CONFIG_USER_ONLY */
2284

    
2285
/* Complex FPU operations which may need stack space. */
2286

    
2287
#define FLOAT_ONE32 make_float32(0x3f8 << 20)
2288
#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2289
#define FLOAT_TWO32 make_float32(1 << 30)
2290
#define FLOAT_TWO64 make_float64(1ULL << 62)
2291
#define FLOAT_QNAN32 0x7fbfffff
2292
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2293
#define FLOAT_SNAN32 0x7fffffff
2294
#define FLOAT_SNAN64 0x7fffffffffffffffULL
2295

    
2296
/* convert MIPS rounding mode in FCR31 to IEEE library */
2297
static unsigned int ieee_rm[] = {
2298
    float_round_nearest_even,
2299
    float_round_to_zero,
2300
    float_round_up,
2301
    float_round_down
2302
};
2303

    
2304
#define RESTORE_ROUNDING_MODE \
2305
    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2306

    
2307
#define RESTORE_FLUSH_MODE \
2308
    set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2309

    
2310
target_ulong helper_cfc1 (uint32_t reg)
2311
{
2312
    target_ulong arg1;
2313

    
2314
    switch (reg) {
2315
    case 0:
2316
        arg1 = (int32_t)env->active_fpu.fcr0;
2317
        break;
2318
    case 25:
2319
        arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2320
        break;
2321
    case 26:
2322
        arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2323
        break;
2324
    case 28:
2325
        arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2326
        break;
2327
    default:
2328
        arg1 = (int32_t)env->active_fpu.fcr31;
2329
        break;
2330
    }
2331

    
2332
    return arg1;
2333
}
2334

    
2335
void helper_ctc1 (target_ulong arg1, uint32_t reg)
2336
{
2337
    switch(reg) {
2338
    case 25:
2339
        if (arg1 & 0xffffff00)
2340
            return;
2341
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2342
                     ((arg1 & 0x1) << 23);
2343
        break;
2344
    case 26:
2345
        if (arg1 & 0x007c0000)
2346
            return;
2347
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2348
        break;
2349
    case 28:
2350
        if (arg1 & 0x007c0000)
2351
            return;
2352
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2353
                     ((arg1 & 0x4) << 22);
2354
        break;
2355
    case 31:
2356
        if (arg1 & 0x007c0000)
2357
            return;
2358
        env->active_fpu.fcr31 = arg1;
2359
        break;
2360
    default:
2361
        return;
2362
    }
2363
    /* set rounding mode */
2364
    RESTORE_ROUNDING_MODE;
2365
    /* set flush-to-zero mode */
2366
    RESTORE_FLUSH_MODE;
2367
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2368
    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2369
        helper_raise_exception(EXCP_FPE);
2370
}
2371

    
2372
static inline int ieee_ex_to_mips(int xcpt)
2373
{
2374
    int ret = 0;
2375
    if (xcpt) {
2376
        if (xcpt & float_flag_invalid) {
2377
            ret |= FP_INVALID;
2378
        }
2379
        if (xcpt & float_flag_overflow) {
2380
            ret |= FP_OVERFLOW;
2381
        }
2382
        if (xcpt & float_flag_underflow) {
2383
            ret |= FP_UNDERFLOW;
2384
        }
2385
        if (xcpt & float_flag_divbyzero) {
2386
            ret |= FP_DIV0;
2387
        }
2388
        if (xcpt & float_flag_inexact) {
2389
            ret |= FP_INEXACT;
2390
        }
2391
    }
2392
    return ret;
2393
}
2394

    
2395
static inline void update_fcr31(void)
2396
{
2397
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2398

    
2399
    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2400
    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2401
        helper_raise_exception(EXCP_FPE);
2402
    else
2403
        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2404
}
2405

    
2406
/* Float support.
2407
   Single precition routines have a "s" suffix, double precision a
2408
   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2409
   paired single lower "pl", paired single upper "pu".  */
2410

    
2411
/* unary operations, modifying fp status  */
2412
uint64_t helper_float_sqrt_d(uint64_t fdt0)
2413
{
2414
    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2415
}
2416

    
2417
uint32_t helper_float_sqrt_s(uint32_t fst0)
2418
{
2419
    return float32_sqrt(fst0, &env->active_fpu.fp_status);
2420
}
2421

    
2422
uint64_t helper_float_cvtd_s(uint32_t fst0)
2423
{
2424
    uint64_t fdt2;
2425

    
2426
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2427
    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2428
    update_fcr31();
2429
    return fdt2;
2430
}
2431

    
2432
uint64_t helper_float_cvtd_w(uint32_t wt0)
2433
{
2434
    uint64_t fdt2;
2435

    
2436
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2437
    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2438
    update_fcr31();
2439
    return fdt2;
2440
}
2441

    
2442
uint64_t helper_float_cvtd_l(uint64_t dt0)
2443
{
2444
    uint64_t fdt2;
2445

    
2446
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2447
    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2448
    update_fcr31();
2449
    return fdt2;
2450
}
2451

    
2452
uint64_t helper_float_cvtl_d(uint64_t fdt0)
2453
{
2454
    uint64_t dt2;
2455

    
2456
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2457
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2458
    update_fcr31();
2459
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2460
        dt2 = FLOAT_SNAN64;
2461
    return dt2;
2462
}
2463

    
2464
uint64_t helper_float_cvtl_s(uint32_t fst0)
2465
{
2466
    uint64_t dt2;
2467

    
2468
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2469
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2470
    update_fcr31();
2471
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2472
        dt2 = FLOAT_SNAN64;
2473
    return dt2;
2474
}
2475

    
2476
uint64_t helper_float_cvtps_pw(uint64_t dt0)
2477
{
2478
    uint32_t fst2;
2479
    uint32_t fsth2;
2480

    
2481
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2482
    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2483
    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2484
    update_fcr31();
2485
    return ((uint64_t)fsth2 << 32) | fst2;
2486
}
2487

    
2488
uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2489
{
2490
    uint32_t wt2;
2491
    uint32_t wth2;
2492

    
2493
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2494
    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2495
    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2496
    update_fcr31();
2497
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2498
        wt2 = FLOAT_SNAN32;
2499
        wth2 = FLOAT_SNAN32;
2500
    }
2501
    return ((uint64_t)wth2 << 32) | wt2;
2502
}
2503

    
2504
uint32_t helper_float_cvts_d(uint64_t fdt0)
2505
{
2506
    uint32_t fst2;
2507

    
2508
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2509
    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2510
    update_fcr31();
2511
    return fst2;
2512
}
2513

    
2514
uint32_t helper_float_cvts_w(uint32_t wt0)
2515
{
2516
    uint32_t fst2;
2517

    
2518
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2519
    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2520
    update_fcr31();
2521
    return fst2;
2522
}
2523

    
2524
uint32_t helper_float_cvts_l(uint64_t dt0)
2525
{
2526
    uint32_t fst2;
2527

    
2528
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2529
    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2530
    update_fcr31();
2531
    return fst2;
2532
}
2533

    
2534
uint32_t helper_float_cvts_pl(uint32_t wt0)
2535
{
2536
    uint32_t wt2;
2537

    
2538
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2539
    wt2 = wt0;
2540
    update_fcr31();
2541
    return wt2;
2542
}
2543

    
2544
uint32_t helper_float_cvts_pu(uint32_t wth0)
2545
{
2546
    uint32_t wt2;
2547

    
2548
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2549
    wt2 = wth0;
2550
    update_fcr31();
2551
    return wt2;
2552
}
2553

    
2554
uint32_t helper_float_cvtw_s(uint32_t fst0)
2555
{
2556
    uint32_t wt2;
2557

    
2558
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2559
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2560
    update_fcr31();
2561
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2562
        wt2 = FLOAT_SNAN32;
2563
    return wt2;
2564
}
2565

    
2566
uint32_t helper_float_cvtw_d(uint64_t fdt0)
2567
{
2568
    uint32_t wt2;
2569

    
2570
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2571
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2572
    update_fcr31();
2573
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2574
        wt2 = FLOAT_SNAN32;
2575
    return wt2;
2576
}
2577

    
2578
uint64_t helper_float_roundl_d(uint64_t fdt0)
2579
{
2580
    uint64_t dt2;
2581

    
2582
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2583
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2584
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2585
    RESTORE_ROUNDING_MODE;
2586
    update_fcr31();
2587
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2588
        dt2 = FLOAT_SNAN64;
2589
    return dt2;
2590
}
2591

    
2592
uint64_t helper_float_roundl_s(uint32_t fst0)
2593
{
2594
    uint64_t dt2;
2595

    
2596
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2597
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2598
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2599
    RESTORE_ROUNDING_MODE;
2600
    update_fcr31();
2601
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2602
        dt2 = FLOAT_SNAN64;
2603
    return dt2;
2604
}
2605

    
2606
uint32_t helper_float_roundw_d(uint64_t fdt0)
2607
{
2608
    uint32_t wt2;
2609

    
2610
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2611
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2612
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2613
    RESTORE_ROUNDING_MODE;
2614
    update_fcr31();
2615
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2616
        wt2 = FLOAT_SNAN32;
2617
    return wt2;
2618
}
2619

    
2620
uint32_t helper_float_roundw_s(uint32_t fst0)
2621
{
2622
    uint32_t wt2;
2623

    
2624
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2625
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2626
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2627
    RESTORE_ROUNDING_MODE;
2628
    update_fcr31();
2629
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2630
        wt2 = FLOAT_SNAN32;
2631
    return wt2;
2632
}
2633

    
2634
uint64_t helper_float_truncl_d(uint64_t fdt0)
2635
{
2636
    uint64_t dt2;
2637

    
2638
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2639
    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2640
    update_fcr31();
2641
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2642
        dt2 = FLOAT_SNAN64;
2643
    return dt2;
2644
}
2645

    
2646
uint64_t helper_float_truncl_s(uint32_t fst0)
2647
{
2648
    uint64_t dt2;
2649

    
2650
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2651
    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2652
    update_fcr31();
2653
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2654
        dt2 = FLOAT_SNAN64;
2655
    return dt2;
2656
}
2657

    
2658
uint32_t helper_float_truncw_d(uint64_t fdt0)
2659
{
2660
    uint32_t wt2;
2661

    
2662
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2663
    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2664
    update_fcr31();
2665
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2666
        wt2 = FLOAT_SNAN32;
2667
    return wt2;
2668
}
2669

    
2670
uint32_t helper_float_truncw_s(uint32_t fst0)
2671
{
2672
    uint32_t wt2;
2673

    
2674
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2675
    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2676
    update_fcr31();
2677
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2678
        wt2 = FLOAT_SNAN32;
2679
    return wt2;
2680
}
2681

    
2682
uint64_t helper_float_ceill_d(uint64_t fdt0)
2683
{
2684
    uint64_t dt2;
2685

    
2686
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2687
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2688
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2689
    RESTORE_ROUNDING_MODE;
2690
    update_fcr31();
2691
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2692
        dt2 = FLOAT_SNAN64;
2693
    return dt2;
2694
}
2695

    
2696
uint64_t helper_float_ceill_s(uint32_t fst0)
2697
{
2698
    uint64_t dt2;
2699

    
2700
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2701
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2702
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2703
    RESTORE_ROUNDING_MODE;
2704
    update_fcr31();
2705
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2706
        dt2 = FLOAT_SNAN64;
2707
    return dt2;
2708
}
2709

    
2710
uint32_t helper_float_ceilw_d(uint64_t fdt0)
2711
{
2712
    uint32_t wt2;
2713

    
2714
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2715
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2716
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2717
    RESTORE_ROUNDING_MODE;
2718
    update_fcr31();
2719
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2720
        wt2 = FLOAT_SNAN32;
2721
    return wt2;
2722
}
2723

    
2724
uint32_t helper_float_ceilw_s(uint32_t fst0)
2725
{
2726
    uint32_t wt2;
2727

    
2728
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2729
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2730
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2731
    RESTORE_ROUNDING_MODE;
2732
    update_fcr31();
2733
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2734
        wt2 = FLOAT_SNAN32;
2735
    return wt2;
2736
}
2737

    
2738
uint64_t helper_float_floorl_d(uint64_t fdt0)
2739
{
2740
    uint64_t dt2;
2741

    
2742
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2743
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2744
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2745
    RESTORE_ROUNDING_MODE;
2746
    update_fcr31();
2747
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2748
        dt2 = FLOAT_SNAN64;
2749
    return dt2;
2750
}
2751

    
2752
uint64_t helper_float_floorl_s(uint32_t fst0)
2753
{
2754
    uint64_t dt2;
2755

    
2756
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2757
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2758
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2759
    RESTORE_ROUNDING_MODE;
2760
    update_fcr31();
2761
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2762
        dt2 = FLOAT_SNAN64;
2763
    return dt2;
2764
}
2765

    
2766
uint32_t helper_float_floorw_d(uint64_t fdt0)
2767
{
2768
    uint32_t wt2;
2769

    
2770
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2771
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2772
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2773
    RESTORE_ROUNDING_MODE;
2774
    update_fcr31();
2775
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2776
        wt2 = FLOAT_SNAN32;
2777
    return wt2;
2778
}
2779

    
2780
uint32_t helper_float_floorw_s(uint32_t fst0)
2781
{
2782
    uint32_t wt2;
2783

    
2784
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2785
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2786
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2787
    RESTORE_ROUNDING_MODE;
2788
    update_fcr31();
2789
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2790
        wt2 = FLOAT_SNAN32;
2791
    return wt2;
2792
}
2793

    
2794
/* unary operations, not modifying fp status  */
2795
#define FLOAT_UNOP(name)                                       \
2796
uint64_t helper_float_ ## name ## _d(uint64_t fdt0)                \
2797
{                                                              \
2798
    return float64_ ## name(fdt0);                             \
2799
}                                                              \
2800
uint32_t helper_float_ ## name ## _s(uint32_t fst0)                \
2801
{                                                              \
2802
    return float32_ ## name(fst0);                             \
2803
}                                                              \
2804
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0)               \
2805
{                                                              \
2806
    uint32_t wt0;                                              \
2807
    uint32_t wth0;                                             \
2808
                                                               \
2809
    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2810
    wth0 = float32_ ## name(fdt0 >> 32);                       \
2811
    return ((uint64_t)wth0 << 32) | wt0;                       \
2812
}
2813
FLOAT_UNOP(abs)
2814
FLOAT_UNOP(chs)
2815
#undef FLOAT_UNOP
2816

    
2817
/* MIPS specific unary operations */
2818
uint64_t helper_float_recip_d(uint64_t fdt0)
2819
{
2820
    uint64_t fdt2;
2821

    
2822
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2823
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2824
    update_fcr31();
2825
    return fdt2;
2826
}
2827

    
2828
uint32_t helper_float_recip_s(uint32_t fst0)
2829
{
2830
    uint32_t fst2;
2831

    
2832
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2833
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2834
    update_fcr31();
2835
    return fst2;
2836
}
2837

    
2838
uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2839
{
2840
    uint64_t fdt2;
2841

    
2842
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2843
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2844
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2845
    update_fcr31();
2846
    return fdt2;
2847
}
2848

    
2849
uint32_t helper_float_rsqrt_s(uint32_t fst0)
2850
{
2851
    uint32_t fst2;
2852

    
2853
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2854
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2855
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2856
    update_fcr31();
2857
    return fst2;
2858
}
2859

    
2860
uint64_t helper_float_recip1_d(uint64_t fdt0)
2861
{
2862
    uint64_t fdt2;
2863

    
2864
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2865
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2866
    update_fcr31();
2867
    return fdt2;
2868
}
2869

    
2870
uint32_t helper_float_recip1_s(uint32_t fst0)
2871
{
2872
    uint32_t fst2;
2873

    
2874
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2875
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2876
    update_fcr31();
2877
    return fst2;
2878
}
2879

    
2880
uint64_t helper_float_recip1_ps(uint64_t fdt0)
2881
{
2882
    uint32_t fst2;
2883
    uint32_t fsth2;
2884

    
2885
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2886
    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2887
    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2888
    update_fcr31();
2889
    return ((uint64_t)fsth2 << 32) | fst2;
2890
}
2891

    
2892
uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2893
{
2894
    uint64_t fdt2;
2895

    
2896
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2897
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2898
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2899
    update_fcr31();
2900
    return fdt2;
2901
}
2902

    
2903
uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2904
{
2905
    uint32_t fst2;
2906

    
2907
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2908
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2909
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2910
    update_fcr31();
2911
    return fst2;
2912
}
2913

    
2914
uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2915
{
2916
    uint32_t fst2;
2917
    uint32_t fsth2;
2918

    
2919
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2920
    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2921
    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2922
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2923
    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2924
    update_fcr31();
2925
    return ((uint64_t)fsth2 << 32) | fst2;
2926
}
2927

    
2928
#define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2929

    
2930
/* binary operations */
2931
#define FLOAT_BINOP(name)                                          \
2932
uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
2933
{                                                                  \
2934
    uint64_t dt2;                                                  \
2935
                                                                   \
2936
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2937
    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
2938
    update_fcr31();                                                \
2939
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2940
        dt2 = FLOAT_QNAN64;                                        \
2941
    return dt2;                                                    \
2942
}                                                                  \
2943
                                                                   \
2944
uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
2945
{                                                                  \
2946
    uint32_t wt2;                                                  \
2947
                                                                   \
2948
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2949
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2950
    update_fcr31();                                                \
2951
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2952
        wt2 = FLOAT_QNAN32;                                        \
2953
    return wt2;                                                    \
2954
}                                                                  \
2955
                                                                   \
2956
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
2957
{                                                                  \
2958
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2959
    uint32_t fsth0 = fdt0 >> 32;                                   \
2960
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2961
    uint32_t fsth1 = fdt1 >> 32;                                   \
2962
    uint32_t wt2;                                                  \
2963
    uint32_t wth2;                                                 \
2964
                                                                   \
2965
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2966
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2967
    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
2968
    update_fcr31();                                                \
2969
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
2970
        wt2 = FLOAT_QNAN32;                                        \
2971
        wth2 = FLOAT_QNAN32;                                       \
2972
    }                                                              \
2973
    return ((uint64_t)wth2 << 32) | wt2;                           \
2974
}
2975

    
2976
FLOAT_BINOP(add)
2977
FLOAT_BINOP(sub)
2978
FLOAT_BINOP(mul)
2979
FLOAT_BINOP(div)
2980
#undef FLOAT_BINOP
2981

    
2982
/* ternary operations */
2983
#define FLOAT_TERNOP(name1, name2)                                        \
2984
uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
2985
                                           uint64_t fdt2)                 \
2986
{                                                                         \
2987
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2988
    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2989
}                                                                         \
2990
                                                                          \
2991
uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
2992
                                           uint32_t fst2)                 \
2993
{                                                                         \
2994
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2995
    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2996
}                                                                         \
2997
                                                                          \
2998
uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2999
                                            uint64_t fdt2)                \
3000
{                                                                         \
3001
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
3002
    uint32_t fsth0 = fdt0 >> 32;                                          \
3003
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
3004
    uint32_t fsth1 = fdt1 >> 32;                                          \
3005
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
3006
    uint32_t fsth2 = fdt2 >> 32;                                          \
3007
                                                                          \
3008
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
3009
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
3010
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
3011
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
3012
    return ((uint64_t)fsth2 << 32) | fst2;                                \
3013
}
3014

    
3015
FLOAT_TERNOP(mul, add)
3016
FLOAT_TERNOP(mul, sub)
3017
#undef FLOAT_TERNOP
3018

    
3019
/* negated ternary operations */
3020
#define FLOAT_NTERNOP(name1, name2)                                       \
3021
uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3022
                                           uint64_t fdt2)                 \
3023
{                                                                         \
3024
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
3025
    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
3026
    return float64_chs(fdt2);                                             \
3027
}                                                                         \
3028
                                                                          \
3029
uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3030
                                           uint32_t fst2)                 \
3031
{                                                                         \
3032
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
3033
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
3034
    return float32_chs(fst2);                                             \
3035
}                                                                         \
3036
                                                                          \
3037
uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
3038
                                           uint64_t fdt2)                 \
3039
{                                                                         \
3040
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
3041
    uint32_t fsth0 = fdt0 >> 32;                                          \
3042
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
3043
    uint32_t fsth1 = fdt1 >> 32;                                          \
3044
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
3045
    uint32_t fsth2 = fdt2 >> 32;                                          \
3046
                                                                          \
3047
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
3048
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
3049
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
3050
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
3051
    fst2 = float32_chs(fst2);                                             \
3052
    fsth2 = float32_chs(fsth2);                                           \
3053
    return ((uint64_t)fsth2 << 32) | fst2;                                \
3054
}
3055

    
3056
FLOAT_NTERNOP(mul, add)
3057
FLOAT_NTERNOP(mul, sub)
3058
#undef FLOAT_NTERNOP
3059

    
3060
/* MIPS specific binary operations */
3061
uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
3062
{
3063
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3064
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3065
    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
3066
    update_fcr31();
3067
    return fdt2;
3068
}
3069

    
3070
uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
3071
{
3072
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3073
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3074
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3075
    update_fcr31();
3076
    return fst2;
3077
}
3078

    
3079
uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
3080
{
3081
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3082
    uint32_t fsth0 = fdt0 >> 32;
3083
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3084
    uint32_t fsth2 = fdt2 >> 32;
3085

    
3086
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3087
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3088
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3089
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3090
    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
3091
    update_fcr31();
3092
    return ((uint64_t)fsth2 << 32) | fst2;
3093
}
3094

    
3095
uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
3096
{
3097
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3098
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3099
    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
3100
    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
3101
    update_fcr31();
3102
    return fdt2;
3103
}
3104

    
3105
uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
3106
{
3107
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3108
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3109
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3110
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3111
    update_fcr31();
3112
    return fst2;
3113
}
3114

    
3115
uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
3116
{
3117
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3118
    uint32_t fsth0 = fdt0 >> 32;
3119
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3120
    uint32_t fsth2 = fdt2 >> 32;
3121

    
3122
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3123
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3124
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3125
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3126
    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
3127
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3128
    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3129
    update_fcr31();
3130
    return ((uint64_t)fsth2 << 32) | fst2;
3131
}
3132

    
3133
uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
3134
{
3135
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3136
    uint32_t fsth0 = fdt0 >> 32;
3137
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3138
    uint32_t fsth1 = fdt1 >> 32;
3139
    uint32_t fst2;
3140
    uint32_t fsth2;
3141

    
3142
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3143
    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3144
    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3145
    update_fcr31();
3146
    return ((uint64_t)fsth2 << 32) | fst2;
3147
}
3148

    
3149
uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
3150
{
3151
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3152
    uint32_t fsth0 = fdt0 >> 32;
3153
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3154
    uint32_t fsth1 = fdt1 >> 32;
3155
    uint32_t fst2;
3156
    uint32_t fsth2;
3157

    
3158
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3159
    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
3160
    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
3161
    update_fcr31();
3162
    return ((uint64_t)fsth2 << 32) | fst2;
3163
}
3164

    
3165
/* compare operations */
3166
#define FOP_COND_D(op, cond)                                   \
3167
void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
3168
{                                                              \
3169
    int c;                                                     \
3170
    set_float_exception_flags(0, &env->active_fpu.fp_status);  \
3171
    c = cond;                                                  \
3172
    update_fcr31();                                            \
3173
    if (c)                                                     \
3174
        SET_FP_COND(cc, env->active_fpu);                      \
3175
    else                                                       \
3176
        CLEAR_FP_COND(cc, env->active_fpu);                    \
3177
}                                                              \
3178
void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3179
{                                                              \
3180
    int c;                                                     \
3181
    set_float_exception_flags(0, &env->active_fpu.fp_status);  \
3182
    fdt0 = float64_abs(fdt0);                                  \
3183
    fdt1 = float64_abs(fdt1);                                  \
3184
    c = cond;                                                  \
3185
    update_fcr31();                                            \
3186
    if (c)                                                     \
3187
        SET_FP_COND(cc, env->active_fpu);                      \
3188
    else                                                       \
3189
        CLEAR_FP_COND(cc, env->active_fpu);                    \
3190
}
3191

    
3192
/* NOTE: the comma operator will make "cond" to eval to false,
3193
 * but float64_unordered_quiet() is still called. */
3194
FOP_COND_D(f,   (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3195
FOP_COND_D(un,  float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
3196
FOP_COND_D(eq,  float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3197
FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3198
FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3199
FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3200
FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3201
FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3202
/* NOTE: the comma operator will make "cond" to eval to false,
3203
 * but float64_unordered() is still called. */
3204
FOP_COND_D(sf,  (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3205
FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
3206
FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3207
FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3208
FOP_COND_D(lt,  float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3209
FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3210
FOP_COND_D(le,  float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3211
FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3212

    
3213
#define FOP_COND_S(op, cond)                                   \
3214
void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
3215
{                                                              \
3216
    int c;                                                     \
3217
    set_float_exception_flags(0, &env->active_fpu.fp_status);  \
3218
    c = cond;                                                  \
3219
    update_fcr31();                                            \
3220
    if (c)                                                     \
3221
        SET_FP_COND(cc, env->active_fpu);                      \
3222
    else                                                       \
3223
        CLEAR_FP_COND(cc, env->active_fpu);                    \
3224
}                                                              \
3225
void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3226
{                                                              \
3227
    int c;                                                     \
3228
    set_float_exception_flags(0, &env->active_fpu.fp_status);  \
3229
    fst0 = float32_abs(fst0);                                  \
3230
    fst1 = float32_abs(fst1);                                  \
3231
    c = cond;                                                  \
3232
    update_fcr31();                                            \
3233
    if (c)                                                     \
3234
        SET_FP_COND(cc, env->active_fpu);                      \
3235
    else                                                       \
3236
        CLEAR_FP_COND(cc, env->active_fpu);                    \
3237
}
3238

    
3239
/* NOTE: the comma operator will make "cond" to eval to false,
3240
 * but float32_unordered_quiet() is still called. */
3241
FOP_COND_S(f,   (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3242
FOP_COND_S(un,  float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
3243
FOP_COND_S(eq,  float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3244
FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)  || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3245
FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3246
FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)  || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3247
FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3248
FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)  || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3249
/* NOTE: the comma operator will make "cond" to eval to false,
3250
 * but float32_unordered() is still called. */
3251
FOP_COND_S(sf,  (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3252
FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
3253
FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3254
FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3255
FOP_COND_S(lt,  float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3256
FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3257
FOP_COND_S(le,  float32_le(fst0, fst1, &env->active_fpu.fp_status))
3258
FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3259

    
3260
#define FOP_COND_PS(op, condl, condh)                           \
3261
void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
3262
{                                                               \
3263
    uint32_t fst0, fsth0, fst1, fsth1;                          \
3264
    int ch, cl;                                                 \
3265
    set_float_exception_flags(0, &env->active_fpu.fp_status);   \
3266
    fst0 = fdt0 & 0XFFFFFFFF;                                   \
3267
    fsth0 = fdt0 >> 32;                                         \
3268
    fst1 = fdt1 & 0XFFFFFFFF;                                   \
3269
    fsth1 = fdt1 >> 32;                                         \
3270
    cl = condl;                                                 \
3271
    ch = condh;                                                 \
3272
    update_fcr31();                                             \
3273
    if (cl)                                                     \
3274
        SET_FP_COND(cc, env->active_fpu);                       \
3275
    else                                                        \
3276
        CLEAR_FP_COND(cc, env->active_fpu);                     \
3277
    if (ch)                                                     \
3278
        SET_FP_COND(cc + 1, env->active_fpu);                   \
3279
    else                                                        \
3280
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3281
}                                                               \
3282
void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3283
{                                                               \
3284
    uint32_t fst0, fsth0, fst1, fsth1;                          \
3285
    int ch, cl;                                                 \
3286
    fst0 = float32_abs(fdt0 & 0XFFFFFFFF);                      \
3287
    fsth0 = float32_abs(fdt0 >> 32);                            \
3288
    fst1 = float32_abs(fdt1 & 0XFFFFFFFF);                      \
3289
    fsth1 = float32_abs(fdt1 >> 32);                            \
3290
    cl = condl;                                                 \
3291
    ch = condh;                                                 \
3292
    update_fcr31();                                             \
3293
    if (cl)                                                     \
3294
        SET_FP_COND(cc, env->active_fpu);                       \
3295
    else                                                        \
3296
        CLEAR_FP_COND(cc, env->active_fpu);                     \
3297
    if (ch)                                                     \
3298
        SET_FP_COND(cc + 1, env->active_fpu);                   \
3299
    else                                                        \
3300
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3301
}
3302

    
3303
/* NOTE: the comma operator will make "cond" to eval to false,
3304
 * but float32_unordered_quiet() is still called. */
3305
FOP_COND_PS(f,   (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3306
                 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3307
FOP_COND_PS(un,  float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3308
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3309
FOP_COND_PS(eq,  float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3310
                 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3311
FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)    || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3312
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3313
FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3314
                 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3315
FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)    || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3316
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3317
FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3318
                 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3319
FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)    || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3320
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3321
/* NOTE: the comma operator will make "cond" to eval to false,
3322
 * but float32_unordered() is still called. */
3323
FOP_COND_PS(sf,  (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3324
                 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3325
FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3326
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3327
FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3328
                 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3329
FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3330
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3331
FOP_COND_PS(lt,  float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3332
                 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3333
FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3334
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3335
FOP_COND_PS(le,  float32_le(fst0, fst1, &env->active_fpu.fp_status),
3336
                 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3337
FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3338
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))