Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ 93148aa5

History | View | Annotate | Download (105.4 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdlib.h>
20
#include "cpu.h"
21
#include "dyngen-exec.h"
22

    
23
#include "host-utils.h"
24

    
25
#include "helper.h"
26

    
27
#if !defined(CONFIG_USER_ONLY)
28
#include "softmmu_exec.h"
29
#endif /* !defined(CONFIG_USER_ONLY) */
30

    
31
#ifndef CONFIG_USER_ONLY
32
static inline void cpu_mips_tlb_flush (CPUState *env, int flush_global);
33
#endif
34

    
35
static inline void compute_hflags(CPUState *env)
36
{
37
    env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
38
                     MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
39
                     MIPS_HFLAG_UX);
40
    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
41
        !(env->CP0_Status & (1 << CP0St_ERL)) &&
42
        !(env->hflags & MIPS_HFLAG_DM)) {
43
        env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
44
    }
45
#if defined(TARGET_MIPS64)
46
    if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
47
        (env->CP0_Status & (1 << CP0St_PX)) ||
48
        (env->CP0_Status & (1 << CP0St_UX))) {
49
        env->hflags |= MIPS_HFLAG_64;
50
    }
51
    if (env->CP0_Status & (1 << CP0St_UX)) {
52
        env->hflags |= MIPS_HFLAG_UX;
53
    }
54
#endif
55
    if ((env->CP0_Status & (1 << CP0St_CU0)) ||
56
        !(env->hflags & MIPS_HFLAG_KSU)) {
57
        env->hflags |= MIPS_HFLAG_CP0;
58
    }
59
    if (env->CP0_Status & (1 << CP0St_CU1)) {
60
        env->hflags |= MIPS_HFLAG_FPU;
61
    }
62
    if (env->CP0_Status & (1 << CP0St_FR)) {
63
        env->hflags |= MIPS_HFLAG_F64;
64
    }
65
    if (env->insn_flags & ISA_MIPS32R2) {
66
        if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
67
            env->hflags |= MIPS_HFLAG_COP1X;
68
        }
69
    } else if (env->insn_flags & ISA_MIPS32) {
70
        if (env->hflags & MIPS_HFLAG_64) {
71
            env->hflags |= MIPS_HFLAG_COP1X;
72
        }
73
    } else if (env->insn_flags & ISA_MIPS4) {
74
        /* All supported MIPS IV CPUs use the XX (CU3) to enable
75
           and disable the MIPS IV extensions to the MIPS III ISA.
76
           Some other MIPS IV CPUs ignore the bit, so the check here
77
           would be too restrictive for them.  */
78
        if (env->CP0_Status & (1 << CP0St_CU3)) {
79
            env->hflags |= MIPS_HFLAG_COP1X;
80
        }
81
    }
82
}
83

    
84
/*****************************************************************************/
85
/* Exceptions processing helpers */
86

    
87
void helper_raise_exception_err (uint32_t exception, int error_code)
88
{
89
#if 1
90
    if (exception < 0x100)
91
        qemu_log("%s: %d %d\n", __func__, exception, error_code);
92
#endif
93
    env->exception_index = exception;
94
    env->error_code = error_code;
95
    cpu_loop_exit(env);
96
}
97

    
98
void helper_raise_exception (uint32_t exception)
99
{
100
    helper_raise_exception_err(exception, 0);
101
}
102

    
103
#if !defined(CONFIG_USER_ONLY)
104
static void do_restore_state (void *pc_ptr)
105
{
106
    TranslationBlock *tb;
107
    unsigned long pc = (unsigned long) pc_ptr;
108
    
109
    tb = tb_find_pc (pc);
110
    if (tb) {
111
        cpu_restore_state(tb, env, pc);
112
    }
113
}
114
#endif
115

    
116
#if defined(CONFIG_USER_ONLY)
117
#define HELPER_LD(name, insn, type)                                     \
118
static inline type do_##name(target_ulong addr, int mem_idx)            \
119
{                                                                       \
120
    return (type) insn##_raw(addr);                                     \
121
}
122
#else
123
#define HELPER_LD(name, insn, type)                                     \
124
static inline type do_##name(target_ulong addr, int mem_idx)            \
125
{                                                                       \
126
    switch (mem_idx)                                                    \
127
    {                                                                   \
128
    case 0: return (type) insn##_kernel(addr); break;                   \
129
    case 1: return (type) insn##_super(addr); break;                    \
130
    default:                                                            \
131
    case 2: return (type) insn##_user(addr); break;                     \
132
    }                                                                   \
133
}
134
#endif
135
HELPER_LD(lbu, ldub, uint8_t)
136
HELPER_LD(lw, ldl, int32_t)
137
#ifdef TARGET_MIPS64
138
HELPER_LD(ld, ldq, int64_t)
139
#endif
140
#undef HELPER_LD
141

    
142
#if defined(CONFIG_USER_ONLY)
143
#define HELPER_ST(name, insn, type)                                     \
144
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
145
{                                                                       \
146
    insn##_raw(addr, val);                                              \
147
}
148
#else
149
#define HELPER_ST(name, insn, type)                                     \
150
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
151
{                                                                       \
152
    switch (mem_idx)                                                    \
153
    {                                                                   \
154
    case 0: insn##_kernel(addr, val); break;                            \
155
    case 1: insn##_super(addr, val); break;                             \
156
    default:                                                            \
157
    case 2: insn##_user(addr, val); break;                              \
158
    }                                                                   \
159
}
160
#endif
161
HELPER_ST(sb, stb, uint8_t)
162
HELPER_ST(sw, stl, uint32_t)
163
#ifdef TARGET_MIPS64
164
HELPER_ST(sd, stq, uint64_t)
165
#endif
166
#undef HELPER_ST
167

    
168
target_ulong helper_clo (target_ulong arg1)
169
{
170
    return clo32(arg1);
171
}
172

    
173
target_ulong helper_clz (target_ulong arg1)
174
{
175
    return clz32(arg1);
176
}
177

    
178
#if defined(TARGET_MIPS64)
179
target_ulong helper_dclo (target_ulong arg1)
180
{
181
    return clo64(arg1);
182
}
183

    
184
target_ulong helper_dclz (target_ulong arg1)
185
{
186
    return clz64(arg1);
187
}
188
#endif /* TARGET_MIPS64 */
189

    
190
/* 64 bits arithmetic for 32 bits hosts */
191
static inline uint64_t get_HILO (void)
192
{
193
    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
194
}
195

    
196
static inline void set_HILO (uint64_t HILO)
197
{
198
    env->active_tc.LO[0] = (int32_t)HILO;
199
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
200
}
201

    
202
static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
203
{
204
    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
205
    arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
206
}
207

    
208
static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
209
{
210
    arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
211
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
212
}
213

    
214
/* Multiplication variants of the vr54xx. */
215
target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
216
{
217
    set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
218

    
219
    return arg1;
220
}
221

    
222
target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
223
{
224
    set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
225

    
226
    return arg1;
227
}
228

    
229
target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
230
{
231
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
232

    
233
    return arg1;
234
}
235

    
236
target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
237
{
238
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
239

    
240
    return arg1;
241
}
242

    
243
target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
244
{
245
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
246

    
247
    return arg1;
248
}
249

    
250
target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
251
{
252
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
253

    
254
    return arg1;
255
}
256

    
257
target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
258
{
259
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
260

    
261
    return arg1;
262
}
263

    
264
target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
265
{
266
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
267

    
268
    return arg1;
269
}
270

    
271
target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
272
{
273
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
274

    
275
    return arg1;
276
}
277

    
278
target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
279
{
280
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
281

    
282
    return arg1;
283
}
284

    
285
target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
286
{
287
    set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
288

    
289
    return arg1;
290
}
291

    
292
target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
293
{
294
    set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
295

    
296
    return arg1;
297
}
298

    
299
target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
300
{
301
    set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
302

    
303
    return arg1;
304
}
305

    
306
target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
307
{
308
    set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
309

    
310
    return arg1;
311
}
312

    
313
#ifdef TARGET_MIPS64
314
void helper_dmult (target_ulong arg1, target_ulong arg2)
315
{
316
    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
317
}
318

    
319
void helper_dmultu (target_ulong arg1, target_ulong arg2)
320
{
321
    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
322
}
323
#endif
324

    
325
#ifndef CONFIG_USER_ONLY
326

    
327
static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
328
{
329
    target_phys_addr_t lladdr;
330

    
331
    lladdr = cpu_mips_translate_address(env, address, rw);
332

    
333
    if (lladdr == -1LL) {
334
        cpu_loop_exit(env);
335
    } else {
336
        return lladdr;
337
    }
338
}
339

    
340
#define HELPER_LD_ATOMIC(name, insn)                                          \
341
target_ulong helper_##name(target_ulong arg, int mem_idx)                     \
342
{                                                                             \
343
    env->lladdr = do_translate_address(arg, 0);                               \
344
    env->llval = do_##insn(arg, mem_idx);                                     \
345
    return env->llval;                                                        \
346
}
347
HELPER_LD_ATOMIC(ll, lw)
348
#ifdef TARGET_MIPS64
349
HELPER_LD_ATOMIC(lld, ld)
350
#endif
351
#undef HELPER_LD_ATOMIC
352

    
353
#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask)                      \
354
target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
355
{                                                                             \
356
    target_long tmp;                                                          \
357
                                                                              \
358
    if (arg2 & almask) {                                                      \
359
        env->CP0_BadVAddr = arg2;                                             \
360
        helper_raise_exception(EXCP_AdES);                                    \
361
    }                                                                         \
362
    if (do_translate_address(arg2, 1) == env->lladdr) {                       \
363
        tmp = do_##ld_insn(arg2, mem_idx);                                    \
364
        if (tmp == env->llval) {                                              \
365
            do_##st_insn(arg2, arg1, mem_idx);                                \
366
            return 1;                                                         \
367
        }                                                                     \
368
    }                                                                         \
369
    return 0;                                                                 \
370
}
371
HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
372
#ifdef TARGET_MIPS64
373
HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
374
#endif
375
#undef HELPER_ST_ATOMIC
376
#endif
377

    
378
#ifdef TARGET_WORDS_BIGENDIAN
379
#define GET_LMASK(v) ((v) & 3)
380
#define GET_OFFSET(addr, offset) (addr + (offset))
381
#else
382
#define GET_LMASK(v) (((v) & 3) ^ 3)
383
#define GET_OFFSET(addr, offset) (addr - (offset))
384
#endif
385

    
386
target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
387
{
388
    target_ulong tmp;
389

    
390
    tmp = do_lbu(arg2, mem_idx);
391
    arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
392

    
393
    if (GET_LMASK(arg2) <= 2) {
394
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
395
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
396
    }
397

    
398
    if (GET_LMASK(arg2) <= 1) {
399
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
400
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
401
    }
402

    
403
    if (GET_LMASK(arg2) == 0) {
404
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
405
        arg1 = (arg1 & 0xFFFFFF00) | tmp;
406
    }
407
    return (int32_t)arg1;
408
}
409

    
410
target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
411
{
412
    target_ulong tmp;
413

    
414
    tmp = do_lbu(arg2, mem_idx);
415
    arg1 = (arg1 & 0xFFFFFF00) | tmp;
416

    
417
    if (GET_LMASK(arg2) >= 1) {
418
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
419
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
420
    }
421

    
422
    if (GET_LMASK(arg2) >= 2) {
423
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
424
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
425
    }
426

    
427
    if (GET_LMASK(arg2) == 3) {
428
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
429
        arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
430
    }
431
    return (int32_t)arg1;
432
}
433

    
434
void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
435
{
436
    do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
437

    
438
    if (GET_LMASK(arg2) <= 2)
439
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
440

    
441
    if (GET_LMASK(arg2) <= 1)
442
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
443

    
444
    if (GET_LMASK(arg2) == 0)
445
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
446
}
447

    
448
void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
449
{
450
    do_sb(arg2, (uint8_t)arg1, mem_idx);
451

    
452
    if (GET_LMASK(arg2) >= 1)
453
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
454

    
455
    if (GET_LMASK(arg2) >= 2)
456
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
457

    
458
    if (GET_LMASK(arg2) == 3)
459
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
460
}
461

    
462
#if defined(TARGET_MIPS64)
463
/* "half" load and stores.  We must do the memory access inline,
464
   or fault handling won't work.  */
465

    
466
#ifdef TARGET_WORDS_BIGENDIAN
467
#define GET_LMASK64(v) ((v) & 7)
468
#else
469
#define GET_LMASK64(v) (((v) & 7) ^ 7)
470
#endif
471

    
472
target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
473
{
474
    uint64_t tmp;
475

    
476
    tmp = do_lbu(arg2, mem_idx);
477
    arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
478

    
479
    if (GET_LMASK64(arg2) <= 6) {
480
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
481
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
482
    }
483

    
484
    if (GET_LMASK64(arg2) <= 5) {
485
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
486
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
487
    }
488

    
489
    if (GET_LMASK64(arg2) <= 4) {
490
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
491
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
492
    }
493

    
494
    if (GET_LMASK64(arg2) <= 3) {
495
        tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
496
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
497
    }
498

    
499
    if (GET_LMASK64(arg2) <= 2) {
500
        tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
501
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
502
    }
503

    
504
    if (GET_LMASK64(arg2) <= 1) {
505
        tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
506
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
507
    }
508

    
509
    if (GET_LMASK64(arg2) == 0) {
510
        tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
511
        arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
512
    }
513

    
514
    return arg1;
515
}
516

    
517
target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
518
{
519
    uint64_t tmp;
520

    
521
    tmp = do_lbu(arg2, mem_idx);
522
    arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
523

    
524
    if (GET_LMASK64(arg2) >= 1) {
525
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
526
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
527
    }
528

    
529
    if (GET_LMASK64(arg2) >= 2) {
530
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
531
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
532
    }
533

    
534
    if (GET_LMASK64(arg2) >= 3) {
535
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
536
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
537
    }
538

    
539
    if (GET_LMASK64(arg2) >= 4) {
540
        tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
541
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
542
    }
543

    
544
    if (GET_LMASK64(arg2) >= 5) {
545
        tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
546
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
547
    }
548

    
549
    if (GET_LMASK64(arg2) >= 6) {
550
        tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
551
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
552
    }
553

    
554
    if (GET_LMASK64(arg2) == 7) {
555
        tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
556
        arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
557
    }
558

    
559
    return arg1;
560
}
561

    
562
void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
563
{
564
    do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
565

    
566
    if (GET_LMASK64(arg2) <= 6)
567
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
568

    
569
    if (GET_LMASK64(arg2) <= 5)
570
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
571

    
572
    if (GET_LMASK64(arg2) <= 4)
573
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
574

    
575
    if (GET_LMASK64(arg2) <= 3)
576
        do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
577

    
578
    if (GET_LMASK64(arg2) <= 2)
579
        do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
580

    
581
    if (GET_LMASK64(arg2) <= 1)
582
        do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
583

    
584
    if (GET_LMASK64(arg2) <= 0)
585
        do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
586
}
587

    
588
void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
589
{
590
    do_sb(arg2, (uint8_t)arg1, mem_idx);
591

    
592
    if (GET_LMASK64(arg2) >= 1)
593
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
594

    
595
    if (GET_LMASK64(arg2) >= 2)
596
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
597

    
598
    if (GET_LMASK64(arg2) >= 3)
599
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
600

    
601
    if (GET_LMASK64(arg2) >= 4)
602
        do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
603

    
604
    if (GET_LMASK64(arg2) >= 5)
605
        do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
606

    
607
    if (GET_LMASK64(arg2) >= 6)
608
        do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
609

    
610
    if (GET_LMASK64(arg2) == 7)
611
        do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
612
}
613
#endif /* TARGET_MIPS64 */
614

    
615
static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
616

    
617
void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
618
{
619
    target_ulong base_reglist = reglist & 0xf;
620
    target_ulong do_r31 = reglist & 0x10;
621
#ifdef CONFIG_USER_ONLY
622
#undef ldfun
623
#define ldfun ldl_raw
624
#else
625
    uint32_t (*ldfun)(target_ulong);
626

    
627
    switch (mem_idx)
628
    {
629
    case 0: ldfun = ldl_kernel; break;
630
    case 1: ldfun = ldl_super; break;
631
    default:
632
    case 2: ldfun = ldl_user; break;
633
    }
634
#endif
635

    
636
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
637
        target_ulong i;
638

    
639
        for (i = 0; i < base_reglist; i++) {
640
            env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
641
            addr += 4;
642
        }
643
    }
644

    
645
    if (do_r31) {
646
        env->active_tc.gpr[31] = (target_long) ldfun(addr);
647
    }
648
}
649

    
650
void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
651
{
652
    target_ulong base_reglist = reglist & 0xf;
653
    target_ulong do_r31 = reglist & 0x10;
654
#ifdef CONFIG_USER_ONLY
655
#undef stfun
656
#define stfun stl_raw
657
#else
658
    void (*stfun)(target_ulong, uint32_t);
659

    
660
    switch (mem_idx)
661
    {
662
    case 0: stfun = stl_kernel; break;
663
    case 1: stfun = stl_super; break;
664
     default:
665
    case 2: stfun = stl_user; break;
666
    }
667
#endif
668

    
669
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
670
        target_ulong i;
671

    
672
        for (i = 0; i < base_reglist; i++) {
673
            stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
674
            addr += 4;
675
        }
676
    }
677

    
678
    if (do_r31) {
679
        stfun(addr, env->active_tc.gpr[31]);
680
    }
681
}
682

    
683
#if defined(TARGET_MIPS64)
684
void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
685
{
686
    target_ulong base_reglist = reglist & 0xf;
687
    target_ulong do_r31 = reglist & 0x10;
688
#ifdef CONFIG_USER_ONLY
689
#undef ldfun
690
#define ldfun ldq_raw
691
#else
692
    uint64_t (*ldfun)(target_ulong);
693

    
694
    switch (mem_idx)
695
    {
696
    case 0: ldfun = ldq_kernel; break;
697
    case 1: ldfun = ldq_super; break;
698
    default:
699
    case 2: ldfun = ldq_user; break;
700
    }
701
#endif
702

    
703
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
704
        target_ulong i;
705

    
706
        for (i = 0; i < base_reglist; i++) {
707
            env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
708
            addr += 8;
709
        }
710
    }
711

    
712
    if (do_r31) {
713
        env->active_tc.gpr[31] = ldfun(addr);
714
    }
715
}
716

    
717
void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
718
{
719
    target_ulong base_reglist = reglist & 0xf;
720
    target_ulong do_r31 = reglist & 0x10;
721
#ifdef CONFIG_USER_ONLY
722
#undef stfun
723
#define stfun stq_raw
724
#else
725
    void (*stfun)(target_ulong, uint64_t);
726

    
727
    switch (mem_idx)
728
    {
729
    case 0: stfun = stq_kernel; break;
730
    case 1: stfun = stq_super; break;
731
     default:
732
    case 2: stfun = stq_user; break;
733
    }
734
#endif
735

    
736
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
737
        target_ulong i;
738

    
739
        for (i = 0; i < base_reglist; i++) {
740
            stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
741
            addr += 8;
742
        }
743
    }
744

    
745
    if (do_r31) {
746
        stfun(addr, env->active_tc.gpr[31]);
747
    }
748
}
749
#endif
750

    
751
#ifndef CONFIG_USER_ONLY
752
/* SMP helpers.  */
753
static int mips_vpe_is_wfi(CPUState *c)
754
{
755
    /* If the VPE is halted but otherwise active, it means it's waiting for
756
       an interrupt.  */
757
    return c->halted && mips_vpe_active(c);
758
}
759

    
760
static inline void mips_vpe_wake(CPUState *c)
761
{
762
    /* Dont set ->halted = 0 directly, let it be done via cpu_has_work
763
       because there might be other conditions that state that c should
764
       be sleeping.  */
765
    cpu_interrupt(c, CPU_INTERRUPT_WAKE);
766
}
767

    
768
static inline void mips_vpe_sleep(CPUState *c)
769
{
770
    /* The VPE was shut off, really go to bed.
771
       Reset any old _WAKE requests.  */
772
    c->halted = 1;
773
    cpu_reset_interrupt(c, CPU_INTERRUPT_WAKE);
774
}
775

    
776
static inline void mips_tc_wake(CPUState *c, int tc)
777
{
778
    /* FIXME: TC reschedule.  */
779
    if (mips_vpe_active(c) && !mips_vpe_is_wfi(c)) {
780
        mips_vpe_wake(c);
781
    }
782
}
783

    
784
static inline void mips_tc_sleep(CPUState *c, int tc)
785
{
786
    /* FIXME: TC reschedule.  */
787
    if (!mips_vpe_active(c)) {
788
        mips_vpe_sleep(c);
789
    }
790
}
791

    
792
/* tc should point to an int with the value of the global TC index.
793
   This function will transform it into a local index within the
794
   returned CPUState.
795

796
   FIXME: This code assumes that all VPEs have the same number of TCs,
797
          which depends on runtime setup. Can probably be fixed by
798
          walking the list of CPUStates.  */
799
static CPUState *mips_cpu_map_tc(int *tc)
800
{
801
    CPUState *other;
802
    int vpe_idx, nr_threads = env->nr_threads;
803
    int tc_idx = *tc;
804

    
805
    if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
806
        /* Not allowed to address other CPUs.  */
807
        *tc = env->current_tc;
808
        return env;
809
    }
810

    
811
    vpe_idx = tc_idx / nr_threads;
812
    *tc = tc_idx % nr_threads;
813
    other = qemu_get_cpu(vpe_idx);
814
    return other ? other : env;
815
}
816

    
817
/* The per VPE CP0_Status register shares some fields with the per TC
818
   CP0_TCStatus registers. These fields are wired to the same registers,
819
   so changes to either of them should be reflected on both registers.
820

821
   Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
822

823
   These helper call synchronizes the regs for a given cpu.  */
824

    
825
/* Called for updates to CP0_Status.  */
826
static void sync_c0_status(CPUState *cpu, int tc)
827
{
828
    int32_t tcstatus, *tcst;
829
    uint32_t v = cpu->CP0_Status;
830
    uint32_t cu, mx, asid, ksu;
831
    uint32_t mask = ((1 << CP0TCSt_TCU3)
832
                       | (1 << CP0TCSt_TCU2)
833
                       | (1 << CP0TCSt_TCU1)
834
                       | (1 << CP0TCSt_TCU0)
835
                       | (1 << CP0TCSt_TMX)
836
                       | (3 << CP0TCSt_TKSU)
837
                       | (0xff << CP0TCSt_TASID));
838

    
839
    cu = (v >> CP0St_CU0) & 0xf;
840
    mx = (v >> CP0St_MX) & 0x1;
841
    ksu = (v >> CP0St_KSU) & 0x3;
842
    asid = env->CP0_EntryHi & 0xff;
843

    
844
    tcstatus = cu << CP0TCSt_TCU0;
845
    tcstatus |= mx << CP0TCSt_TMX;
846
    tcstatus |= ksu << CP0TCSt_TKSU;
847
    tcstatus |= asid;
848

    
849
    if (tc == cpu->current_tc) {
850
        tcst = &cpu->active_tc.CP0_TCStatus;
851
    } else {
852
        tcst = &cpu->tcs[tc].CP0_TCStatus;
853
    }
854

    
855
    *tcst &= ~mask;
856
    *tcst |= tcstatus;
857
    compute_hflags(cpu);
858
}
859

    
860
/* Called for updates to CP0_TCStatus.  */
861
static void sync_c0_tcstatus(CPUState *cpu, int tc, target_ulong v)
862
{
863
    uint32_t status;
864
    uint32_t tcu, tmx, tasid, tksu;
865
    uint32_t mask = ((1 << CP0St_CU3)
866
                       | (1 << CP0St_CU2)
867
                       | (1 << CP0St_CU1)
868
                       | (1 << CP0St_CU0)
869
                       | (1 << CP0St_MX)
870
                       | (3 << CP0St_KSU));
871

    
872
    tcu = (v >> CP0TCSt_TCU0) & 0xf;
873
    tmx = (v >> CP0TCSt_TMX) & 0x1;
874
    tasid = v & 0xff;
875
    tksu = (v >> CP0TCSt_TKSU) & 0x3;
876

    
877
    status = tcu << CP0St_CU0;
878
    status |= tmx << CP0St_MX;
879
    status |= tksu << CP0St_KSU;
880

    
881
    cpu->CP0_Status &= ~mask;
882
    cpu->CP0_Status |= status;
883

    
884
    /* Sync the TASID with EntryHi.  */
885
    cpu->CP0_EntryHi &= ~0xff;
886
    cpu->CP0_EntryHi = tasid;
887

    
888
    compute_hflags(cpu);
889
}
890

    
891
/* Called for updates to CP0_EntryHi.  */
892
static void sync_c0_entryhi(CPUState *cpu, int tc)
893
{
894
    int32_t *tcst;
895
    uint32_t asid, v = cpu->CP0_EntryHi;
896

    
897
    asid = v & 0xff;
898

    
899
    if (tc == cpu->current_tc) {
900
        tcst = &cpu->active_tc.CP0_TCStatus;
901
    } else {
902
        tcst = &cpu->tcs[tc].CP0_TCStatus;
903
    }
904

    
905
    *tcst &= ~0xff;
906
    *tcst |= asid;
907
}
908

    
909
/* CP0 helpers */
910
target_ulong helper_mfc0_mvpcontrol (void)
911
{
912
    return env->mvp->CP0_MVPControl;
913
}
914

    
915
target_ulong helper_mfc0_mvpconf0 (void)
916
{
917
    return env->mvp->CP0_MVPConf0;
918
}
919

    
920
target_ulong helper_mfc0_mvpconf1 (void)
921
{
922
    return env->mvp->CP0_MVPConf1;
923
}
924

    
925
target_ulong helper_mfc0_random (void)
926
{
927
    return (int32_t)cpu_mips_get_random(env);
928
}
929

    
930
target_ulong helper_mfc0_tcstatus (void)
931
{
932
    return env->active_tc.CP0_TCStatus;
933
}
934

    
935
target_ulong helper_mftc0_tcstatus(void)
936
{
937
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
938
    CPUState *other = mips_cpu_map_tc(&other_tc);
939

    
940
    if (other_tc == other->current_tc)
941
        return other->active_tc.CP0_TCStatus;
942
    else
943
        return other->tcs[other_tc].CP0_TCStatus;
944
}
945

    
946
target_ulong helper_mfc0_tcbind (void)
947
{
948
    return env->active_tc.CP0_TCBind;
949
}
950

    
951
target_ulong helper_mftc0_tcbind(void)
952
{
953
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
954
    CPUState *other = mips_cpu_map_tc(&other_tc);
955

    
956
    if (other_tc == other->current_tc)
957
        return other->active_tc.CP0_TCBind;
958
    else
959
        return other->tcs[other_tc].CP0_TCBind;
960
}
961

    
962
target_ulong helper_mfc0_tcrestart (void)
963
{
964
    return env->active_tc.PC;
965
}
966

    
967
target_ulong helper_mftc0_tcrestart(void)
968
{
969
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
970
    CPUState *other = mips_cpu_map_tc(&other_tc);
971

    
972
    if (other_tc == other->current_tc)
973
        return other->active_tc.PC;
974
    else
975
        return other->tcs[other_tc].PC;
976
}
977

    
978
target_ulong helper_mfc0_tchalt (void)
979
{
980
    return env->active_tc.CP0_TCHalt;
981
}
982

    
983
target_ulong helper_mftc0_tchalt(void)
984
{
985
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
986
    CPUState *other = mips_cpu_map_tc(&other_tc);
987

    
988
    if (other_tc == other->current_tc)
989
        return other->active_tc.CP0_TCHalt;
990
    else
991
        return other->tcs[other_tc].CP0_TCHalt;
992
}
993

    
994
target_ulong helper_mfc0_tccontext (void)
995
{
996
    return env->active_tc.CP0_TCContext;
997
}
998

    
999
target_ulong helper_mftc0_tccontext(void)
1000
{
1001
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1002
    CPUState *other = mips_cpu_map_tc(&other_tc);
1003

    
1004
    if (other_tc == other->current_tc)
1005
        return other->active_tc.CP0_TCContext;
1006
    else
1007
        return other->tcs[other_tc].CP0_TCContext;
1008
}
1009

    
1010
target_ulong helper_mfc0_tcschedule (void)
1011
{
1012
    return env->active_tc.CP0_TCSchedule;
1013
}
1014

    
1015
target_ulong helper_mftc0_tcschedule(void)
1016
{
1017
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1018
    CPUState *other = mips_cpu_map_tc(&other_tc);
1019

    
1020
    if (other_tc == other->current_tc)
1021
        return other->active_tc.CP0_TCSchedule;
1022
    else
1023
        return other->tcs[other_tc].CP0_TCSchedule;
1024
}
1025

    
1026
target_ulong helper_mfc0_tcschefback (void)
1027
{
1028
    return env->active_tc.CP0_TCScheFBack;
1029
}
1030

    
1031
target_ulong helper_mftc0_tcschefback(void)
1032
{
1033
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1034
    CPUState *other = mips_cpu_map_tc(&other_tc);
1035

    
1036
    if (other_tc == other->current_tc)
1037
        return other->active_tc.CP0_TCScheFBack;
1038
    else
1039
        return other->tcs[other_tc].CP0_TCScheFBack;
1040
}
1041

    
1042
target_ulong helper_mfc0_count (void)
1043
{
1044
    return (int32_t)cpu_mips_get_count(env);
1045
}
1046

    
1047
target_ulong helper_mftc0_entryhi(void)
1048
{
1049
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1050
    CPUState *other = mips_cpu_map_tc(&other_tc);
1051

    
1052
    return other->CP0_EntryHi;
1053
}
1054

    
1055
target_ulong helper_mftc0_cause(void)
1056
{
1057
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1058
    int32_t tccause;
1059
    CPUState *other = mips_cpu_map_tc(&other_tc);
1060

    
1061
    if (other_tc == other->current_tc) {
1062
        tccause = other->CP0_Cause;
1063
    } else {
1064
        tccause = other->CP0_Cause;
1065
    }
1066

    
1067
    return tccause;
1068
}
1069

    
1070
target_ulong helper_mftc0_status(void)
1071
{
1072
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1073
    CPUState *other = mips_cpu_map_tc(&other_tc);
1074

    
1075
    return other->CP0_Status;
1076
}
1077

    
1078
target_ulong helper_mfc0_lladdr (void)
1079
{
1080
    return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
1081
}
1082

    
1083
target_ulong helper_mfc0_watchlo (uint32_t sel)
1084
{
1085
    return (int32_t)env->CP0_WatchLo[sel];
1086
}
1087

    
1088
target_ulong helper_mfc0_watchhi (uint32_t sel)
1089
{
1090
    return env->CP0_WatchHi[sel];
1091
}
1092

    
1093
target_ulong helper_mfc0_debug (void)
1094
{
1095
    target_ulong t0 = env->CP0_Debug;
1096
    if (env->hflags & MIPS_HFLAG_DM)
1097
        t0 |= 1 << CP0DB_DM;
1098

    
1099
    return t0;
1100
}
1101

    
1102
target_ulong helper_mftc0_debug(void)
1103
{
1104
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1105
    int32_t tcstatus;
1106
    CPUState *other = mips_cpu_map_tc(&other_tc);
1107

    
1108
    if (other_tc == other->current_tc)
1109
        tcstatus = other->active_tc.CP0_Debug_tcstatus;
1110
    else
1111
        tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
1112

    
1113
    /* XXX: Might be wrong, check with EJTAG spec. */
1114
    return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1115
            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1116
}
1117

    
1118
#if defined(TARGET_MIPS64)
1119
target_ulong helper_dmfc0_tcrestart (void)
1120
{
1121
    return env->active_tc.PC;
1122
}
1123

    
1124
target_ulong helper_dmfc0_tchalt (void)
1125
{
1126
    return env->active_tc.CP0_TCHalt;
1127
}
1128

    
1129
target_ulong helper_dmfc0_tccontext (void)
1130
{
1131
    return env->active_tc.CP0_TCContext;
1132
}
1133

    
1134
target_ulong helper_dmfc0_tcschedule (void)
1135
{
1136
    return env->active_tc.CP0_TCSchedule;
1137
}
1138

    
1139
target_ulong helper_dmfc0_tcschefback (void)
1140
{
1141
    return env->active_tc.CP0_TCScheFBack;
1142
}
1143

    
1144
target_ulong helper_dmfc0_lladdr (void)
1145
{
1146
    return env->lladdr >> env->CP0_LLAddr_shift;
1147
}
1148

    
1149
target_ulong helper_dmfc0_watchlo (uint32_t sel)
1150
{
1151
    return env->CP0_WatchLo[sel];
1152
}
1153
#endif /* TARGET_MIPS64 */
1154

    
1155
void helper_mtc0_index (target_ulong arg1)
1156
{
1157
    int num = 1;
1158
    unsigned int tmp = env->tlb->nb_tlb;
1159

    
1160
    do {
1161
        tmp >>= 1;
1162
        num <<= 1;
1163
    } while (tmp);
1164
    env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
1165
}
1166

    
1167
void helper_mtc0_mvpcontrol (target_ulong arg1)
1168
{
1169
    uint32_t mask = 0;
1170
    uint32_t newval;
1171

    
1172
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1173
        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1174
                (1 << CP0MVPCo_EVP);
1175
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1176
        mask |= (1 << CP0MVPCo_STLB);
1177
    newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1178

    
1179
    // TODO: Enable/disable shared TLB, enable/disable VPEs.
1180

    
1181
    env->mvp->CP0_MVPControl = newval;
1182
}
1183

    
1184
void helper_mtc0_vpecontrol (target_ulong arg1)
1185
{
1186
    uint32_t mask;
1187
    uint32_t newval;
1188

    
1189
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1190
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1191
    newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1192

    
1193
    /* Yield scheduler intercept not implemented. */
1194
    /* Gating storage scheduler intercept not implemented. */
1195

    
1196
    // TODO: Enable/disable TCs.
1197

    
1198
    env->CP0_VPEControl = newval;
1199
}
1200

    
1201
void helper_mttc0_vpecontrol(target_ulong arg1)
1202
{
1203
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1204
    CPUState *other = mips_cpu_map_tc(&other_tc);
1205
    uint32_t mask;
1206
    uint32_t newval;
1207

    
1208
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1209
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1210
    newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
1211

    
1212
    /* TODO: Enable/disable TCs.  */
1213

    
1214
    other->CP0_VPEControl = newval;
1215
}
1216

    
1217
target_ulong helper_mftc0_vpecontrol(void)
1218
{
1219
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1220
    CPUState *other = mips_cpu_map_tc(&other_tc);
1221
    /* FIXME: Mask away return zero on read bits.  */
1222
    return other->CP0_VPEControl;
1223
}
1224

    
1225
target_ulong helper_mftc0_vpeconf0(void)
1226
{
1227
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1228
    CPUState *other = mips_cpu_map_tc(&other_tc);
1229

    
1230
    return other->CP0_VPEConf0;
1231
}
1232

    
1233
void helper_mtc0_vpeconf0 (target_ulong arg1)
1234
{
1235
    uint32_t mask = 0;
1236
    uint32_t newval;
1237

    
1238
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1239
        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1240
            mask |= (0xff << CP0VPEC0_XTC);
1241
        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1242
    }
1243
    newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1244

    
1245
    // TODO: TC exclusive handling due to ERL/EXL.
1246

    
1247
    env->CP0_VPEConf0 = newval;
1248
}
1249

    
1250
void helper_mttc0_vpeconf0(target_ulong arg1)
1251
{
1252
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1253
    CPUState *other = mips_cpu_map_tc(&other_tc);
1254
    uint32_t mask = 0;
1255
    uint32_t newval;
1256

    
1257
    mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1258
    newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1259

    
1260
    /* TODO: TC exclusive handling due to ERL/EXL.  */
1261
    other->CP0_VPEConf0 = newval;
1262
}
1263

    
1264
void helper_mtc0_vpeconf1 (target_ulong arg1)
1265
{
1266
    uint32_t mask = 0;
1267
    uint32_t newval;
1268

    
1269
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1270
        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1271
                (0xff << CP0VPEC1_NCP1);
1272
    newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1273

    
1274
    /* UDI not implemented. */
1275
    /* CP2 not implemented. */
1276

    
1277
    // TODO: Handle FPU (CP1) binding.
1278

    
1279
    env->CP0_VPEConf1 = newval;
1280
}
1281

    
1282
void helper_mtc0_yqmask (target_ulong arg1)
1283
{
1284
    /* Yield qualifier inputs not implemented. */
1285
    env->CP0_YQMask = 0x00000000;
1286
}
1287

    
1288
void helper_mtc0_vpeopt (target_ulong arg1)
1289
{
1290
    env->CP0_VPEOpt = arg1 & 0x0000ffff;
1291
}
1292

    
1293
void helper_mtc0_entrylo0 (target_ulong arg1)
1294
{
1295
    /* Large physaddr (PABITS) not implemented */
1296
    /* 1k pages not implemented */
1297
    env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1298
}
1299

    
1300
void helper_mtc0_tcstatus (target_ulong arg1)
1301
{
1302
    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1303
    uint32_t newval;
1304

    
1305
    newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1306

    
1307
    env->active_tc.CP0_TCStatus = newval;
1308
    sync_c0_tcstatus(env, env->current_tc, newval);
1309
}
1310

    
1311
void helper_mttc0_tcstatus (target_ulong arg1)
1312
{
1313
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1314
    CPUState *other = mips_cpu_map_tc(&other_tc);
1315

    
1316
    if (other_tc == other->current_tc)
1317
        other->active_tc.CP0_TCStatus = arg1;
1318
    else
1319
        other->tcs[other_tc].CP0_TCStatus = arg1;
1320
    sync_c0_tcstatus(other, other_tc, arg1);
1321
}
1322

    
1323
void helper_mtc0_tcbind (target_ulong arg1)
1324
{
1325
    uint32_t mask = (1 << CP0TCBd_TBE);
1326
    uint32_t newval;
1327

    
1328
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1329
        mask |= (1 << CP0TCBd_CurVPE);
1330
    newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1331
    env->active_tc.CP0_TCBind = newval;
1332
}
1333

    
1334
void helper_mttc0_tcbind (target_ulong arg1)
1335
{
1336
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1337
    uint32_t mask = (1 << CP0TCBd_TBE);
1338
    uint32_t newval;
1339
    CPUState *other = mips_cpu_map_tc(&other_tc);
1340

    
1341
    if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1342
        mask |= (1 << CP0TCBd_CurVPE);
1343
    if (other_tc == other->current_tc) {
1344
        newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1345
        other->active_tc.CP0_TCBind = newval;
1346
    } else {
1347
        newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1348
        other->tcs[other_tc].CP0_TCBind = newval;
1349
    }
1350
}
1351

    
1352
void helper_mtc0_tcrestart (target_ulong arg1)
1353
{
1354
    env->active_tc.PC = arg1;
1355
    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1356
    env->lladdr = 0ULL;
1357
    /* MIPS16 not implemented. */
1358
}
1359

    
1360
void helper_mttc0_tcrestart (target_ulong arg1)
1361
{
1362
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1363
    CPUState *other = mips_cpu_map_tc(&other_tc);
1364

    
1365
    if (other_tc == other->current_tc) {
1366
        other->active_tc.PC = arg1;
1367
        other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1368
        other->lladdr = 0ULL;
1369
        /* MIPS16 not implemented. */
1370
    } else {
1371
        other->tcs[other_tc].PC = arg1;
1372
        other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1373
        other->lladdr = 0ULL;
1374
        /* MIPS16 not implemented. */
1375
    }
1376
}
1377

    
1378
void helper_mtc0_tchalt (target_ulong arg1)
1379
{
1380
    env->active_tc.CP0_TCHalt = arg1 & 0x1;
1381

    
1382
    // TODO: Halt TC / Restart (if allocated+active) TC.
1383
    if (env->active_tc.CP0_TCHalt & 1) {
1384
        mips_tc_sleep(env, env->current_tc);
1385
    } else {
1386
        mips_tc_wake(env, env->current_tc);
1387
    }
1388
}
1389

    
1390
void helper_mttc0_tchalt (target_ulong arg1)
1391
{
1392
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1393
    CPUState *other = mips_cpu_map_tc(&other_tc);
1394

    
1395
    // TODO: Halt TC / Restart (if allocated+active) TC.
1396

    
1397
    if (other_tc == other->current_tc)
1398
        other->active_tc.CP0_TCHalt = arg1;
1399
    else
1400
        other->tcs[other_tc].CP0_TCHalt = arg1;
1401

    
1402
    if (arg1 & 1) {
1403
        mips_tc_sleep(other, other_tc);
1404
    } else {
1405
        mips_tc_wake(other, other_tc);
1406
    }
1407
}
1408

    
1409
void helper_mtc0_tccontext (target_ulong arg1)
1410
{
1411
    env->active_tc.CP0_TCContext = arg1;
1412
}
1413

    
1414
void helper_mttc0_tccontext (target_ulong arg1)
1415
{
1416
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1417
    CPUState *other = mips_cpu_map_tc(&other_tc);
1418

    
1419
    if (other_tc == other->current_tc)
1420
        other->active_tc.CP0_TCContext = arg1;
1421
    else
1422
        other->tcs[other_tc].CP0_TCContext = arg1;
1423
}
1424

    
1425
void helper_mtc0_tcschedule (target_ulong arg1)
1426
{
1427
    env->active_tc.CP0_TCSchedule = arg1;
1428
}
1429

    
1430
void helper_mttc0_tcschedule (target_ulong arg1)
1431
{
1432
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1433
    CPUState *other = mips_cpu_map_tc(&other_tc);
1434

    
1435
    if (other_tc == other->current_tc)
1436
        other->active_tc.CP0_TCSchedule = arg1;
1437
    else
1438
        other->tcs[other_tc].CP0_TCSchedule = arg1;
1439
}
1440

    
1441
void helper_mtc0_tcschefback (target_ulong arg1)
1442
{
1443
    env->active_tc.CP0_TCScheFBack = arg1;
1444
}
1445

    
1446
void helper_mttc0_tcschefback (target_ulong arg1)
1447
{
1448
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1449
    CPUState *other = mips_cpu_map_tc(&other_tc);
1450

    
1451
    if (other_tc == other->current_tc)
1452
        other->active_tc.CP0_TCScheFBack = arg1;
1453
    else
1454
        other->tcs[other_tc].CP0_TCScheFBack = arg1;
1455
}
1456

    
1457
void helper_mtc0_entrylo1 (target_ulong arg1)
1458
{
1459
    /* Large physaddr (PABITS) not implemented */
1460
    /* 1k pages not implemented */
1461
    env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1462
}
1463

    
1464
void helper_mtc0_context (target_ulong arg1)
1465
{
1466
    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1467
}
1468

    
1469
void helper_mtc0_pagemask (target_ulong arg1)
1470
{
1471
    /* 1k pages not implemented */
1472
    env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1473
}
1474

    
1475
void helper_mtc0_pagegrain (target_ulong arg1)
1476
{
1477
    /* SmartMIPS not implemented */
1478
    /* Large physaddr (PABITS) not implemented */
1479
    /* 1k pages not implemented */
1480
    env->CP0_PageGrain = 0;
1481
}
1482

    
1483
void helper_mtc0_wired (target_ulong arg1)
1484
{
1485
    env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1486
}
1487

    
1488
void helper_mtc0_srsconf0 (target_ulong arg1)
1489
{
1490
    env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1491
}
1492

    
1493
void helper_mtc0_srsconf1 (target_ulong arg1)
1494
{
1495
    env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1496
}
1497

    
1498
void helper_mtc0_srsconf2 (target_ulong arg1)
1499
{
1500
    env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1501
}
1502

    
1503
void helper_mtc0_srsconf3 (target_ulong arg1)
1504
{
1505
    env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1506
}
1507

    
1508
void helper_mtc0_srsconf4 (target_ulong arg1)
1509
{
1510
    env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1511
}
1512

    
1513
void helper_mtc0_hwrena (target_ulong arg1)
1514
{
1515
    env->CP0_HWREna = arg1 & 0x0000000F;
1516
}
1517

    
1518
void helper_mtc0_count (target_ulong arg1)
1519
{
1520
    cpu_mips_store_count(env, arg1);
1521
}
1522

    
1523
void helper_mtc0_entryhi (target_ulong arg1)
1524
{
1525
    target_ulong old, val;
1526

    
1527
    /* 1k pages not implemented */
1528
    val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1529
#if defined(TARGET_MIPS64)
1530
    val &= env->SEGMask;
1531
#endif
1532
    old = env->CP0_EntryHi;
1533
    env->CP0_EntryHi = val;
1534
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1535
        sync_c0_entryhi(env, env->current_tc);
1536
    }
1537
    /* If the ASID changes, flush qemu's TLB.  */
1538
    if ((old & 0xFF) != (val & 0xFF))
1539
        cpu_mips_tlb_flush(env, 1);
1540
}
1541

    
1542
void helper_mttc0_entryhi(target_ulong arg1)
1543
{
1544
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1545
    CPUState *other = mips_cpu_map_tc(&other_tc);
1546

    
1547
    other->CP0_EntryHi = arg1;
1548
    sync_c0_entryhi(other, other_tc);
1549
}
1550

    
1551
void helper_mtc0_compare (target_ulong arg1)
1552
{
1553
    cpu_mips_store_compare(env, arg1);
1554
}
1555

    
1556
void helper_mtc0_status (target_ulong arg1)
1557
{
1558
    uint32_t val, old;
1559
    uint32_t mask = env->CP0_Status_rw_bitmask;
1560

    
1561
    val = arg1 & mask;
1562
    old = env->CP0_Status;
1563
    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1564
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1565
        sync_c0_status(env, env->current_tc);
1566
    } else {
1567
        compute_hflags(env);
1568
    }
1569

    
1570
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1571
        qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1572
                old, old & env->CP0_Cause & CP0Ca_IP_mask,
1573
                val, val & env->CP0_Cause & CP0Ca_IP_mask,
1574
                env->CP0_Cause);
1575
        switch (env->hflags & MIPS_HFLAG_KSU) {
1576
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1577
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1578
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1579
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1580
        }
1581
    }
1582
}
1583

    
1584
void helper_mttc0_status(target_ulong arg1)
1585
{
1586
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1587
    CPUState *other = mips_cpu_map_tc(&other_tc);
1588

    
1589
    other->CP0_Status = arg1 & ~0xf1000018;
1590
    sync_c0_status(other, other_tc);
1591
}
1592

    
1593
void helper_mtc0_intctl (target_ulong arg1)
1594
{
1595
    /* vectored interrupts not implemented, no performance counters. */
1596
    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1597
}
1598

    
1599
void helper_mtc0_srsctl (target_ulong arg1)
1600
{
1601
    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1602
    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1603
}
1604

    
1605
static void mtc0_cause(CPUState *cpu, target_ulong arg1)
1606
{
1607
    uint32_t mask = 0x00C00300;
1608
    uint32_t old = cpu->CP0_Cause;
1609
    int i;
1610

    
1611
    if (cpu->insn_flags & ISA_MIPS32R2) {
1612
        mask |= 1 << CP0Ca_DC;
1613
    }
1614

    
1615
    cpu->CP0_Cause = (cpu->CP0_Cause & ~mask) | (arg1 & mask);
1616

    
1617
    if ((old ^ cpu->CP0_Cause) & (1 << CP0Ca_DC)) {
1618
        if (cpu->CP0_Cause & (1 << CP0Ca_DC)) {
1619
            cpu_mips_stop_count(cpu);
1620
        } else {
1621
            cpu_mips_start_count(cpu);
1622
        }
1623
    }
1624

    
1625
    /* Set/reset software interrupts */
1626
    for (i = 0 ; i < 2 ; i++) {
1627
        if ((old ^ cpu->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1628
            cpu_mips_soft_irq(cpu, i, cpu->CP0_Cause & (1 << (CP0Ca_IP + i)));
1629
        }
1630
    }
1631
}
1632

    
1633
void helper_mtc0_cause(target_ulong arg1)
1634
{
1635
    mtc0_cause(env, arg1);
1636
}
1637

    
1638
void helper_mttc0_cause(target_ulong arg1)
1639
{
1640
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1641
    CPUState *other = mips_cpu_map_tc(&other_tc);
1642

    
1643
    mtc0_cause(other, arg1);
1644
}
1645

    
1646
target_ulong helper_mftc0_epc(void)
1647
{
1648
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1649
    CPUState *other = mips_cpu_map_tc(&other_tc);
1650

    
1651
    return other->CP0_EPC;
1652
}
1653

    
1654
target_ulong helper_mftc0_ebase(void)
1655
{
1656
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1657
    CPUState *other = mips_cpu_map_tc(&other_tc);
1658

    
1659
    return other->CP0_EBase;
1660
}
1661

    
1662
void helper_mtc0_ebase (target_ulong arg1)
1663
{
1664
    /* vectored interrupts not implemented */
1665
    env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1666
}
1667

    
1668
void helper_mttc0_ebase(target_ulong arg1)
1669
{
1670
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1671
    CPUState *other = mips_cpu_map_tc(&other_tc);
1672
    other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1673
}
1674

    
1675
target_ulong helper_mftc0_configx(target_ulong idx)
1676
{
1677
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1678
    CPUState *other = mips_cpu_map_tc(&other_tc);
1679

    
1680
    switch (idx) {
1681
    case 0: return other->CP0_Config0;
1682
    case 1: return other->CP0_Config1;
1683
    case 2: return other->CP0_Config2;
1684
    case 3: return other->CP0_Config3;
1685
    /* 4 and 5 are reserved.  */
1686
    case 6: return other->CP0_Config6;
1687
    case 7: return other->CP0_Config7;
1688
    default:
1689
        break;
1690
    }
1691
    return 0;
1692
}
1693

    
1694
void helper_mtc0_config0 (target_ulong arg1)
1695
{
1696
    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1697
}
1698

    
1699
void helper_mtc0_config2 (target_ulong arg1)
1700
{
1701
    /* tertiary/secondary caches not implemented */
1702
    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1703
}
1704

    
1705
void helper_mtc0_lladdr (target_ulong arg1)
1706
{
1707
    target_long mask = env->CP0_LLAddr_rw_bitmask;
1708
    arg1 = arg1 << env->CP0_LLAddr_shift;
1709
    env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1710
}
1711

    
1712
void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1713
{
1714
    /* Watch exceptions for instructions, data loads, data stores
1715
       not implemented. */
1716
    env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1717
}
1718

    
1719
void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1720
{
1721
    env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1722
    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1723
}
1724

    
1725
void helper_mtc0_xcontext (target_ulong arg1)
1726
{
1727
    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1728
    env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1729
}
1730

    
1731
void helper_mtc0_framemask (target_ulong arg1)
1732
{
1733
    env->CP0_Framemask = arg1; /* XXX */
1734
}
1735

    
1736
void helper_mtc0_debug (target_ulong arg1)
1737
{
1738
    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1739
    if (arg1 & (1 << CP0DB_DM))
1740
        env->hflags |= MIPS_HFLAG_DM;
1741
    else
1742
        env->hflags &= ~MIPS_HFLAG_DM;
1743
}
1744

    
1745
void helper_mttc0_debug(target_ulong arg1)
1746
{
1747
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1748
    uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1749
    CPUState *other = mips_cpu_map_tc(&other_tc);
1750

    
1751
    /* XXX: Might be wrong, check with EJTAG spec. */
1752
    if (other_tc == other->current_tc)
1753
        other->active_tc.CP0_Debug_tcstatus = val;
1754
    else
1755
        other->tcs[other_tc].CP0_Debug_tcstatus = val;
1756
    other->CP0_Debug = (other->CP0_Debug &
1757
                     ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1758
                     (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1759
}
1760

    
1761
void helper_mtc0_performance0 (target_ulong arg1)
1762
{
1763
    env->CP0_Performance0 = arg1 & 0x000007ff;
1764
}
1765

    
1766
void helper_mtc0_taglo (target_ulong arg1)
1767
{
1768
    env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1769
}
1770

    
1771
void helper_mtc0_datalo (target_ulong arg1)
1772
{
1773
    env->CP0_DataLo = arg1; /* XXX */
1774
}
1775

    
1776
void helper_mtc0_taghi (target_ulong arg1)
1777
{
1778
    env->CP0_TagHi = arg1; /* XXX */
1779
}
1780

    
1781
void helper_mtc0_datahi (target_ulong arg1)
1782
{
1783
    env->CP0_DataHi = arg1; /* XXX */
1784
}
1785

    
1786
/* MIPS MT functions */
1787
target_ulong helper_mftgpr(uint32_t sel)
1788
{
1789
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1790
    CPUState *other = mips_cpu_map_tc(&other_tc);
1791

    
1792
    if (other_tc == other->current_tc)
1793
        return other->active_tc.gpr[sel];
1794
    else
1795
        return other->tcs[other_tc].gpr[sel];
1796
}
1797

    
1798
target_ulong helper_mftlo(uint32_t sel)
1799
{
1800
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1801
    CPUState *other = mips_cpu_map_tc(&other_tc);
1802

    
1803
    if (other_tc == other->current_tc)
1804
        return other->active_tc.LO[sel];
1805
    else
1806
        return other->tcs[other_tc].LO[sel];
1807
}
1808

    
1809
target_ulong helper_mfthi(uint32_t sel)
1810
{
1811
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1812
    CPUState *other = mips_cpu_map_tc(&other_tc);
1813

    
1814
    if (other_tc == other->current_tc)
1815
        return other->active_tc.HI[sel];
1816
    else
1817
        return other->tcs[other_tc].HI[sel];
1818
}
1819

    
1820
target_ulong helper_mftacx(uint32_t sel)
1821
{
1822
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1823
    CPUState *other = mips_cpu_map_tc(&other_tc);
1824

    
1825
    if (other_tc == other->current_tc)
1826
        return other->active_tc.ACX[sel];
1827
    else
1828
        return other->tcs[other_tc].ACX[sel];
1829
}
1830

    
1831
target_ulong helper_mftdsp(void)
1832
{
1833
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1834
    CPUState *other = mips_cpu_map_tc(&other_tc);
1835

    
1836
    if (other_tc == other->current_tc)
1837
        return other->active_tc.DSPControl;
1838
    else
1839
        return other->tcs[other_tc].DSPControl;
1840
}
1841

    
1842
void helper_mttgpr(target_ulong arg1, uint32_t sel)
1843
{
1844
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1845
    CPUState *other = mips_cpu_map_tc(&other_tc);
1846

    
1847
    if (other_tc == other->current_tc)
1848
        other->active_tc.gpr[sel] = arg1;
1849
    else
1850
        other->tcs[other_tc].gpr[sel] = arg1;
1851
}
1852

    
1853
void helper_mttlo(target_ulong arg1, uint32_t sel)
1854
{
1855
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1856
    CPUState *other = mips_cpu_map_tc(&other_tc);
1857

    
1858
    if (other_tc == other->current_tc)
1859
        other->active_tc.LO[sel] = arg1;
1860
    else
1861
        other->tcs[other_tc].LO[sel] = arg1;
1862
}
1863

    
1864
void helper_mtthi(target_ulong arg1, uint32_t sel)
1865
{
1866
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1867
    CPUState *other = mips_cpu_map_tc(&other_tc);
1868

    
1869
    if (other_tc == other->current_tc)
1870
        other->active_tc.HI[sel] = arg1;
1871
    else
1872
        other->tcs[other_tc].HI[sel] = arg1;
1873
}
1874

    
1875
void helper_mttacx(target_ulong arg1, uint32_t sel)
1876
{
1877
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1878
    CPUState *other = mips_cpu_map_tc(&other_tc);
1879

    
1880
    if (other_tc == other->current_tc)
1881
        other->active_tc.ACX[sel] = arg1;
1882
    else
1883
        other->tcs[other_tc].ACX[sel] = arg1;
1884
}
1885

    
1886
void helper_mttdsp(target_ulong arg1)
1887
{
1888
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1889
    CPUState *other = mips_cpu_map_tc(&other_tc);
1890

    
1891
    if (other_tc == other->current_tc)
1892
        other->active_tc.DSPControl = arg1;
1893
    else
1894
        other->tcs[other_tc].DSPControl = arg1;
1895
}
1896

    
1897
/* MIPS MT functions */
1898
target_ulong helper_dmt(void)
1899
{
1900
    // TODO
1901
     return 0;
1902
}
1903

    
1904
target_ulong helper_emt(void)
1905
{
1906
    // TODO
1907
    return 0;
1908
}
1909

    
1910
target_ulong helper_dvpe(void)
1911
{
1912
    CPUState *other_cpu = first_cpu;
1913
    target_ulong prev = env->mvp->CP0_MVPControl;
1914

    
1915
    do {
1916
        /* Turn off all VPEs except the one executing the dvpe.  */
1917
        if (other_cpu != env) {
1918
            other_cpu->mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1919
            mips_vpe_sleep(other_cpu);
1920
        }
1921
        other_cpu = other_cpu->next_cpu;
1922
    } while (other_cpu);
1923
    return prev;
1924
}
1925

    
1926
target_ulong helper_evpe(void)
1927
{
1928
    CPUState *other_cpu = first_cpu;
1929
    target_ulong prev = env->mvp->CP0_MVPControl;
1930

    
1931
    do {
1932
        if (other_cpu != env
1933
           /* If the VPE is WFI, don't disturb its sleep.  */
1934
           && !mips_vpe_is_wfi(other_cpu)) {
1935
            /* Enable the VPE.  */
1936
            other_cpu->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1937
            mips_vpe_wake(other_cpu); /* And wake it up.  */
1938
        }
1939
        other_cpu = other_cpu->next_cpu;
1940
    } while (other_cpu);
1941
    return prev;
1942
}
1943
#endif /* !CONFIG_USER_ONLY */
1944

    
1945
void helper_fork(target_ulong arg1, target_ulong arg2)
1946
{
1947
    // arg1 = rt, arg2 = rs
1948
    arg1 = 0;
1949
    // TODO: store to TC register
1950
}
1951

    
1952
target_ulong helper_yield(target_ulong arg)
1953
{
1954
    target_long arg1 = arg;
1955

    
1956
    if (arg1 < 0) {
1957
        /* No scheduling policy implemented. */
1958
        if (arg1 != -2) {
1959
            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1960
                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1961
                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1962
                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1963
                helper_raise_exception(EXCP_THREAD);
1964
            }
1965
        }
1966
    } else if (arg1 == 0) {
1967
        if (0 /* TODO: TC underflow */) {
1968
            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1969
            helper_raise_exception(EXCP_THREAD);
1970
        } else {
1971
            // TODO: Deallocate TC
1972
        }
1973
    } else if (arg1 > 0) {
1974
        /* Yield qualifier inputs not implemented. */
1975
        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1976
        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1977
        helper_raise_exception(EXCP_THREAD);
1978
    }
1979
    return env->CP0_YQMask;
1980
}
1981

    
1982
#ifndef CONFIG_USER_ONLY
1983
/* TLB management */
1984
static void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1985
{
1986
    /* Flush qemu's TLB and discard all shadowed entries.  */
1987
    tlb_flush (env, flush_global);
1988
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
1989
}
1990

    
1991
static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1992
{
1993
    /* Discard entries from env->tlb[first] onwards.  */
1994
    while (env->tlb->tlb_in_use > first) {
1995
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1996
    }
1997
}
1998

    
1999
static void r4k_fill_tlb (int idx)
2000
{
2001
    r4k_tlb_t *tlb;
2002

    
2003
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
2004
    tlb = &env->tlb->mmu.r4k.tlb[idx];
2005
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
2006
#if defined(TARGET_MIPS64)
2007
    tlb->VPN &= env->SEGMask;
2008
#endif
2009
    tlb->ASID = env->CP0_EntryHi & 0xFF;
2010
    tlb->PageMask = env->CP0_PageMask;
2011
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
2012
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
2013
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
2014
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
2015
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
2016
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
2017
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
2018
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
2019
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
2020
}
2021

    
2022
void r4k_helper_tlbwi (void)
2023
{
2024
    int idx;
2025

    
2026
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2027

    
2028
    /* Discard cached TLB entries.  We could avoid doing this if the
2029
       tlbwi is just upgrading access permissions on the current entry;
2030
       that might be a further win.  */
2031
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
2032

    
2033
    r4k_invalidate_tlb(env, idx, 0);
2034
    r4k_fill_tlb(idx);
2035
}
2036

    
2037
void r4k_helper_tlbwr (void)
2038
{
2039
    int r = cpu_mips_get_random(env);
2040

    
2041
    r4k_invalidate_tlb(env, r, 1);
2042
    r4k_fill_tlb(r);
2043
}
2044

    
2045
void r4k_helper_tlbp (void)
2046
{
2047
    r4k_tlb_t *tlb;
2048
    target_ulong mask;
2049
    target_ulong tag;
2050
    target_ulong VPN;
2051
    uint8_t ASID;
2052
    int i;
2053

    
2054
    ASID = env->CP0_EntryHi & 0xFF;
2055
    for (i = 0; i < env->tlb->nb_tlb; i++) {
2056
        tlb = &env->tlb->mmu.r4k.tlb[i];
2057
        /* 1k pages are not supported. */
2058
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2059
        tag = env->CP0_EntryHi & ~mask;
2060
        VPN = tlb->VPN & ~mask;
2061
        /* Check ASID, virtual page number & size */
2062
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2063
            /* TLB match */
2064
            env->CP0_Index = i;
2065
            break;
2066
        }
2067
    }
2068
    if (i == env->tlb->nb_tlb) {
2069
        /* No match.  Discard any shadow entries, if any of them match.  */
2070
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
2071
            tlb = &env->tlb->mmu.r4k.tlb[i];
2072
            /* 1k pages are not supported. */
2073
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2074
            tag = env->CP0_EntryHi & ~mask;
2075
            VPN = tlb->VPN & ~mask;
2076
            /* Check ASID, virtual page number & size */
2077
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2078
                r4k_mips_tlb_flush_extra (env, i);
2079
                break;
2080
            }
2081
        }
2082

    
2083
        env->CP0_Index |= 0x80000000;
2084
    }
2085
}
2086

    
2087
void r4k_helper_tlbr (void)
2088
{
2089
    r4k_tlb_t *tlb;
2090
    uint8_t ASID;
2091
    int idx;
2092

    
2093
    ASID = env->CP0_EntryHi & 0xFF;
2094
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2095
    tlb = &env->tlb->mmu.r4k.tlb[idx];
2096

    
2097
    /* If this will change the current ASID, flush qemu's TLB.  */
2098
    if (ASID != tlb->ASID)
2099
        cpu_mips_tlb_flush (env, 1);
2100

    
2101
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
2102

    
2103
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
2104
    env->CP0_PageMask = tlb->PageMask;
2105
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
2106
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
2107
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
2108
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
2109
}
2110

    
2111
void helper_tlbwi(void)
2112
{
2113
    env->tlb->helper_tlbwi();
2114
}
2115

    
2116
void helper_tlbwr(void)
2117
{
2118
    env->tlb->helper_tlbwr();
2119
}
2120

    
2121
void helper_tlbp(void)
2122
{
2123
    env->tlb->helper_tlbp();
2124
}
2125

    
2126
void helper_tlbr(void)
2127
{
2128
    env->tlb->helper_tlbr();
2129
}
2130

    
2131
/* Specials */
2132
target_ulong helper_di (void)
2133
{
2134
    target_ulong t0 = env->CP0_Status;
2135

    
2136
    env->CP0_Status = t0 & ~(1 << CP0St_IE);
2137
    return t0;
2138
}
2139

    
2140
target_ulong helper_ei (void)
2141
{
2142
    target_ulong t0 = env->CP0_Status;
2143

    
2144
    env->CP0_Status = t0 | (1 << CP0St_IE);
2145
    return t0;
2146
}
2147

    
2148
static void debug_pre_eret (void)
2149
{
2150
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2151
        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2152
                env->active_tc.PC, env->CP0_EPC);
2153
        if (env->CP0_Status & (1 << CP0St_ERL))
2154
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2155
        if (env->hflags & MIPS_HFLAG_DM)
2156
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2157
        qemu_log("\n");
2158
    }
2159
}
2160

    
2161
static void debug_post_eret (void)
2162
{
2163
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2164
        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2165
                env->active_tc.PC, env->CP0_EPC);
2166
        if (env->CP0_Status & (1 << CP0St_ERL))
2167
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2168
        if (env->hflags & MIPS_HFLAG_DM)
2169
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2170
        switch (env->hflags & MIPS_HFLAG_KSU) {
2171
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
2172
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
2173
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
2174
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
2175
        }
2176
    }
2177
}
2178

    
2179
static void set_pc (target_ulong error_pc)
2180
{
2181
    env->active_tc.PC = error_pc & ~(target_ulong)1;
2182
    if (error_pc & 1) {
2183
        env->hflags |= MIPS_HFLAG_M16;
2184
    } else {
2185
        env->hflags &= ~(MIPS_HFLAG_M16);
2186
    }
2187
}
2188

    
2189
void helper_eret (void)
2190
{
2191
    debug_pre_eret();
2192
    if (env->CP0_Status & (1 << CP0St_ERL)) {
2193
        set_pc(env->CP0_ErrorEPC);
2194
        env->CP0_Status &= ~(1 << CP0St_ERL);
2195
    } else {
2196
        set_pc(env->CP0_EPC);
2197
        env->CP0_Status &= ~(1 << CP0St_EXL);
2198
    }
2199
    compute_hflags(env);
2200
    debug_post_eret();
2201
    env->lladdr = 1;
2202
}
2203

    
2204
void helper_deret (void)
2205
{
2206
    debug_pre_eret();
2207
    set_pc(env->CP0_DEPC);
2208

    
2209
    env->hflags &= MIPS_HFLAG_DM;
2210
    compute_hflags(env);
2211
    debug_post_eret();
2212
    env->lladdr = 1;
2213
}
2214
#endif /* !CONFIG_USER_ONLY */
2215

    
2216
target_ulong helper_rdhwr_cpunum(void)
2217
{
2218
    if ((env->hflags & MIPS_HFLAG_CP0) ||
2219
        (env->CP0_HWREna & (1 << 0)))
2220
        return env->CP0_EBase & 0x3ff;
2221
    else
2222
        helper_raise_exception(EXCP_RI);
2223

    
2224
    return 0;
2225
}
2226

    
2227
target_ulong helper_rdhwr_synci_step(void)
2228
{
2229
    if ((env->hflags & MIPS_HFLAG_CP0) ||
2230
        (env->CP0_HWREna & (1 << 1)))
2231
        return env->SYNCI_Step;
2232
    else
2233
        helper_raise_exception(EXCP_RI);
2234

    
2235
    return 0;
2236
}
2237

    
2238
target_ulong helper_rdhwr_cc(void)
2239
{
2240
    if ((env->hflags & MIPS_HFLAG_CP0) ||
2241
        (env->CP0_HWREna & (1 << 2)))
2242
        return env->CP0_Count;
2243
    else
2244
        helper_raise_exception(EXCP_RI);
2245

    
2246
    return 0;
2247
}
2248

    
2249
target_ulong helper_rdhwr_ccres(void)
2250
{
2251
    if ((env->hflags & MIPS_HFLAG_CP0) ||
2252
        (env->CP0_HWREna & (1 << 3)))
2253
        return env->CCRes;
2254
    else
2255
        helper_raise_exception(EXCP_RI);
2256

    
2257
    return 0;
2258
}
2259

    
2260
void helper_pmon (int function)
2261
{
2262
    function /= 2;
2263
    switch (function) {
2264
    case 2: /* TODO: char inbyte(int waitflag); */
2265
        if (env->active_tc.gpr[4] == 0)
2266
            env->active_tc.gpr[2] = -1;
2267
        /* Fall through */
2268
    case 11: /* TODO: char inbyte (void); */
2269
        env->active_tc.gpr[2] = -1;
2270
        break;
2271
    case 3:
2272
    case 12:
2273
        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2274
        break;
2275
    case 17:
2276
        break;
2277
    case 158:
2278
        {
2279
            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
2280
            printf("%s", fmt);
2281
        }
2282
        break;
2283
    }
2284
}
2285

    
2286
void helper_wait (void)
2287
{
2288
    env->halted = 1;
2289
    cpu_reset_interrupt(env, CPU_INTERRUPT_WAKE);
2290
    helper_raise_exception(EXCP_HLT);
2291
}
2292

    
2293
#if !defined(CONFIG_USER_ONLY)
2294

    
2295
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
2296

    
2297
#define MMUSUFFIX _mmu
2298
#define ALIGNED_ONLY
2299

    
2300
#define SHIFT 0
2301
#include "softmmu_template.h"
2302

    
2303
#define SHIFT 1
2304
#include "softmmu_template.h"
2305

    
2306
#define SHIFT 2
2307
#include "softmmu_template.h"
2308

    
2309
#define SHIFT 3
2310
#include "softmmu_template.h"
2311

    
2312
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
2313
{
2314
    env->CP0_BadVAddr = addr;
2315
    do_restore_state (retaddr);
2316
    helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
2317
}
2318

    
2319
void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
2320
              void *retaddr)
2321
{
2322
    TranslationBlock *tb;
2323
    CPUState *saved_env;
2324
    unsigned long pc;
2325
    int ret;
2326

    
2327
    saved_env = env;
2328
    env = env1;
2329
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2330
    if (ret) {
2331
        if (retaddr) {
2332
            /* now we have a real cpu fault */
2333
            pc = (unsigned long)retaddr;
2334
            tb = tb_find_pc(pc);
2335
            if (tb) {
2336
                /* the PC is inside the translated code. It means that we have
2337
                   a virtual CPU fault */
2338
                cpu_restore_state(tb, env, pc);
2339
            }
2340
        }
2341
        helper_raise_exception_err(env->exception_index, env->error_code);
2342
    }
2343
    env = saved_env;
2344
}
2345

    
2346
void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
2347
                           int is_write, int is_exec, int unused, int size)
2348
{
2349
    env = env1;
2350

    
2351
    if (is_exec)
2352
        helper_raise_exception(EXCP_IBE);
2353
    else
2354
        helper_raise_exception(EXCP_DBE);
2355
}
2356
#endif /* !CONFIG_USER_ONLY */
2357

    
2358
/* Complex FPU operations which may need stack space. */
2359

    
2360
#define FLOAT_ONE32 make_float32(0x3f8 << 20)
2361
#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2362
#define FLOAT_TWO32 make_float32(1 << 30)
2363
#define FLOAT_TWO64 make_float64(1ULL << 62)
2364
#define FLOAT_QNAN32 0x7fbfffff
2365
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2366
#define FLOAT_SNAN32 0x7fffffff
2367
#define FLOAT_SNAN64 0x7fffffffffffffffULL
2368

    
2369
/* convert MIPS rounding mode in FCR31 to IEEE library */
2370
static unsigned int ieee_rm[] = {
2371
    float_round_nearest_even,
2372
    float_round_to_zero,
2373
    float_round_up,
2374
    float_round_down
2375
};
2376

    
2377
#define RESTORE_ROUNDING_MODE \
2378
    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2379

    
2380
#define RESTORE_FLUSH_MODE \
2381
    set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2382

    
2383
target_ulong helper_cfc1 (uint32_t reg)
2384
{
2385
    target_ulong arg1;
2386

    
2387
    switch (reg) {
2388
    case 0:
2389
        arg1 = (int32_t)env->active_fpu.fcr0;
2390
        break;
2391
    case 25:
2392
        arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2393
        break;
2394
    case 26:
2395
        arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2396
        break;
2397
    case 28:
2398
        arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2399
        break;
2400
    default:
2401
        arg1 = (int32_t)env->active_fpu.fcr31;
2402
        break;
2403
    }
2404

    
2405
    return arg1;
2406
}
2407

    
2408
void helper_ctc1 (target_ulong arg1, uint32_t reg)
2409
{
2410
    switch(reg) {
2411
    case 25:
2412
        if (arg1 & 0xffffff00)
2413
            return;
2414
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2415
                     ((arg1 & 0x1) << 23);
2416
        break;
2417
    case 26:
2418
        if (arg1 & 0x007c0000)
2419
            return;
2420
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2421
        break;
2422
    case 28:
2423
        if (arg1 & 0x007c0000)
2424
            return;
2425
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2426
                     ((arg1 & 0x4) << 22);
2427
        break;
2428
    case 31:
2429
        if (arg1 & 0x007c0000)
2430
            return;
2431
        env->active_fpu.fcr31 = arg1;
2432
        break;
2433
    default:
2434
        return;
2435
    }
2436
    /* set rounding mode */
2437
    RESTORE_ROUNDING_MODE;
2438
    /* set flush-to-zero mode */
2439
    RESTORE_FLUSH_MODE;
2440
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2441
    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2442
        helper_raise_exception(EXCP_FPE);
2443
}
2444

    
2445
static inline int ieee_ex_to_mips(int xcpt)
2446
{
2447
    int ret = 0;
2448
    if (xcpt) {
2449
        if (xcpt & float_flag_invalid) {
2450
            ret |= FP_INVALID;
2451
        }
2452
        if (xcpt & float_flag_overflow) {
2453
            ret |= FP_OVERFLOW;
2454
        }
2455
        if (xcpt & float_flag_underflow) {
2456
            ret |= FP_UNDERFLOW;
2457
        }
2458
        if (xcpt & float_flag_divbyzero) {
2459
            ret |= FP_DIV0;
2460
        }
2461
        if (xcpt & float_flag_inexact) {
2462
            ret |= FP_INEXACT;
2463
        }
2464
    }
2465
    return ret;
2466
}
2467

    
2468
static inline void update_fcr31(void)
2469
{
2470
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2471

    
2472
    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2473
    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2474
        helper_raise_exception(EXCP_FPE);
2475
    else
2476
        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2477
}
2478

    
2479
/* Float support.
2480
   Single precition routines have a "s" suffix, double precision a
2481
   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2482
   paired single lower "pl", paired single upper "pu".  */
2483

    
2484
/* unary operations, modifying fp status  */
2485
uint64_t helper_float_sqrt_d(uint64_t fdt0)
2486
{
2487
    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2488
}
2489

    
2490
uint32_t helper_float_sqrt_s(uint32_t fst0)
2491
{
2492
    return float32_sqrt(fst0, &env->active_fpu.fp_status);
2493
}
2494

    
2495
uint64_t helper_float_cvtd_s(uint32_t fst0)
2496
{
2497
    uint64_t fdt2;
2498

    
2499
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2500
    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2501
    update_fcr31();
2502
    return fdt2;
2503
}
2504

    
2505
uint64_t helper_float_cvtd_w(uint32_t wt0)
2506
{
2507
    uint64_t fdt2;
2508

    
2509
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2510
    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2511
    update_fcr31();
2512
    return fdt2;
2513
}
2514

    
2515
uint64_t helper_float_cvtd_l(uint64_t dt0)
2516
{
2517
    uint64_t fdt2;
2518

    
2519
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2520
    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2521
    update_fcr31();
2522
    return fdt2;
2523
}
2524

    
2525
uint64_t helper_float_cvtl_d(uint64_t fdt0)
2526
{
2527
    uint64_t dt2;
2528

    
2529
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2530
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2531
    update_fcr31();
2532
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2533
        dt2 = FLOAT_SNAN64;
2534
    return dt2;
2535
}
2536

    
2537
uint64_t helper_float_cvtl_s(uint32_t fst0)
2538
{
2539
    uint64_t dt2;
2540

    
2541
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2542
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2543
    update_fcr31();
2544
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2545
        dt2 = FLOAT_SNAN64;
2546
    return dt2;
2547
}
2548

    
2549
uint64_t helper_float_cvtps_pw(uint64_t dt0)
2550
{
2551
    uint32_t fst2;
2552
    uint32_t fsth2;
2553

    
2554
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2555
    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2556
    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2557
    update_fcr31();
2558
    return ((uint64_t)fsth2 << 32) | fst2;
2559
}
2560

    
2561
uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2562
{
2563
    uint32_t wt2;
2564
    uint32_t wth2;
2565

    
2566
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2567
    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2568
    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2569
    update_fcr31();
2570
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2571
        wt2 = FLOAT_SNAN32;
2572
        wth2 = FLOAT_SNAN32;
2573
    }
2574
    return ((uint64_t)wth2 << 32) | wt2;
2575
}
2576

    
2577
uint32_t helper_float_cvts_d(uint64_t fdt0)
2578
{
2579
    uint32_t fst2;
2580

    
2581
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2582
    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2583
    update_fcr31();
2584
    return fst2;
2585
}
2586

    
2587
uint32_t helper_float_cvts_w(uint32_t wt0)
2588
{
2589
    uint32_t fst2;
2590

    
2591
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2592
    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2593
    update_fcr31();
2594
    return fst2;
2595
}
2596

    
2597
uint32_t helper_float_cvts_l(uint64_t dt0)
2598
{
2599
    uint32_t fst2;
2600

    
2601
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2602
    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2603
    update_fcr31();
2604
    return fst2;
2605
}
2606

    
2607
uint32_t helper_float_cvts_pl(uint32_t wt0)
2608
{
2609
    uint32_t wt2;
2610

    
2611
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2612
    wt2 = wt0;
2613
    update_fcr31();
2614
    return wt2;
2615
}
2616

    
2617
uint32_t helper_float_cvts_pu(uint32_t wth0)
2618
{
2619
    uint32_t wt2;
2620

    
2621
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2622
    wt2 = wth0;
2623
    update_fcr31();
2624
    return wt2;
2625
}
2626

    
2627
uint32_t helper_float_cvtw_s(uint32_t fst0)
2628
{
2629
    uint32_t wt2;
2630

    
2631
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2632
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2633
    update_fcr31();
2634
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2635
        wt2 = FLOAT_SNAN32;
2636
    return wt2;
2637
}
2638

    
2639
uint32_t helper_float_cvtw_d(uint64_t fdt0)
2640
{
2641
    uint32_t wt2;
2642

    
2643
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2644
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2645
    update_fcr31();
2646
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2647
        wt2 = FLOAT_SNAN32;
2648
    return wt2;
2649
}
2650

    
2651
uint64_t helper_float_roundl_d(uint64_t fdt0)
2652
{
2653
    uint64_t dt2;
2654

    
2655
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2656
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2657
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2658
    RESTORE_ROUNDING_MODE;
2659
    update_fcr31();
2660
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2661
        dt2 = FLOAT_SNAN64;
2662
    return dt2;
2663
}
2664

    
2665
uint64_t helper_float_roundl_s(uint32_t fst0)
2666
{
2667
    uint64_t dt2;
2668

    
2669
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2670
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2671
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2672
    RESTORE_ROUNDING_MODE;
2673
    update_fcr31();
2674
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2675
        dt2 = FLOAT_SNAN64;
2676
    return dt2;
2677
}
2678

    
2679
uint32_t helper_float_roundw_d(uint64_t fdt0)
2680
{
2681
    uint32_t wt2;
2682

    
2683
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2684
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2685
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2686
    RESTORE_ROUNDING_MODE;
2687
    update_fcr31();
2688
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2689
        wt2 = FLOAT_SNAN32;
2690
    return wt2;
2691
}
2692

    
2693
uint32_t helper_float_roundw_s(uint32_t fst0)
2694
{
2695
    uint32_t wt2;
2696

    
2697
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2698
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2699
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2700
    RESTORE_ROUNDING_MODE;
2701
    update_fcr31();
2702
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2703
        wt2 = FLOAT_SNAN32;
2704
    return wt2;
2705
}
2706

    
2707
uint64_t helper_float_truncl_d(uint64_t fdt0)
2708
{
2709
    uint64_t dt2;
2710

    
2711
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2712
    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2713
    update_fcr31();
2714
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2715
        dt2 = FLOAT_SNAN64;
2716
    return dt2;
2717
}
2718

    
2719
uint64_t helper_float_truncl_s(uint32_t fst0)
2720
{
2721
    uint64_t dt2;
2722

    
2723
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2724
    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2725
    update_fcr31();
2726
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2727
        dt2 = FLOAT_SNAN64;
2728
    return dt2;
2729
}
2730

    
2731
uint32_t helper_float_truncw_d(uint64_t fdt0)
2732
{
2733
    uint32_t wt2;
2734

    
2735
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2736
    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2737
    update_fcr31();
2738
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2739
        wt2 = FLOAT_SNAN32;
2740
    return wt2;
2741
}
2742

    
2743
uint32_t helper_float_truncw_s(uint32_t fst0)
2744
{
2745
    uint32_t wt2;
2746

    
2747
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2748
    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2749
    update_fcr31();
2750
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2751
        wt2 = FLOAT_SNAN32;
2752
    return wt2;
2753
}
2754

    
2755
uint64_t helper_float_ceill_d(uint64_t fdt0)
2756
{
2757
    uint64_t dt2;
2758

    
2759
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2760
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2761
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2762
    RESTORE_ROUNDING_MODE;
2763
    update_fcr31();
2764
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2765
        dt2 = FLOAT_SNAN64;
2766
    return dt2;
2767
}
2768

    
2769
uint64_t helper_float_ceill_s(uint32_t fst0)
2770
{
2771
    uint64_t dt2;
2772

    
2773
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2774
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2775
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2776
    RESTORE_ROUNDING_MODE;
2777
    update_fcr31();
2778
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2779
        dt2 = FLOAT_SNAN64;
2780
    return dt2;
2781
}
2782

    
2783
uint32_t helper_float_ceilw_d(uint64_t fdt0)
2784
{
2785
    uint32_t wt2;
2786

    
2787
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2788
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2789
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2790
    RESTORE_ROUNDING_MODE;
2791
    update_fcr31();
2792
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2793
        wt2 = FLOAT_SNAN32;
2794
    return wt2;
2795
}
2796

    
2797
uint32_t helper_float_ceilw_s(uint32_t fst0)
2798
{
2799
    uint32_t wt2;
2800

    
2801
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2802
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2803
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2804
    RESTORE_ROUNDING_MODE;
2805
    update_fcr31();
2806
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2807
        wt2 = FLOAT_SNAN32;
2808
    return wt2;
2809
}
2810

    
2811
uint64_t helper_float_floorl_d(uint64_t fdt0)
2812
{
2813
    uint64_t dt2;
2814

    
2815
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2816
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2817
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2818
    RESTORE_ROUNDING_MODE;
2819
    update_fcr31();
2820
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2821
        dt2 = FLOAT_SNAN64;
2822
    return dt2;
2823
}
2824

    
2825
uint64_t helper_float_floorl_s(uint32_t fst0)
2826
{
2827
    uint64_t dt2;
2828

    
2829
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2830
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2831
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2832
    RESTORE_ROUNDING_MODE;
2833
    update_fcr31();
2834
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2835
        dt2 = FLOAT_SNAN64;
2836
    return dt2;
2837
}
2838

    
2839
uint32_t helper_float_floorw_d(uint64_t fdt0)
2840
{
2841
    uint32_t wt2;
2842

    
2843
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2844
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2845
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2846
    RESTORE_ROUNDING_MODE;
2847
    update_fcr31();
2848
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2849
        wt2 = FLOAT_SNAN32;
2850
    return wt2;
2851
}
2852

    
2853
uint32_t helper_float_floorw_s(uint32_t fst0)
2854
{
2855
    uint32_t wt2;
2856

    
2857
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2858
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2859
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2860
    RESTORE_ROUNDING_MODE;
2861
    update_fcr31();
2862
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2863
        wt2 = FLOAT_SNAN32;
2864
    return wt2;
2865
}
2866

    
2867
/* unary operations, not modifying fp status  */
2868
#define FLOAT_UNOP(name)                                       \
2869
uint64_t helper_float_ ## name ## _d(uint64_t fdt0)                \
2870
{                                                              \
2871
    return float64_ ## name(fdt0);                             \
2872
}                                                              \
2873
uint32_t helper_float_ ## name ## _s(uint32_t fst0)                \
2874
{                                                              \
2875
    return float32_ ## name(fst0);                             \
2876
}                                                              \
2877
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0)               \
2878
{                                                              \
2879
    uint32_t wt0;                                              \
2880
    uint32_t wth0;                                             \
2881
                                                               \
2882
    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2883
    wth0 = float32_ ## name(fdt0 >> 32);                       \
2884
    return ((uint64_t)wth0 << 32) | wt0;                       \
2885
}
2886
FLOAT_UNOP(abs)
2887
FLOAT_UNOP(chs)
2888
#undef FLOAT_UNOP
2889

    
2890
/* MIPS specific unary operations */
2891
uint64_t helper_float_recip_d(uint64_t fdt0)
2892
{
2893
    uint64_t fdt2;
2894

    
2895
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2896
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2897
    update_fcr31();
2898
    return fdt2;
2899
}
2900

    
2901
uint32_t helper_float_recip_s(uint32_t fst0)
2902
{
2903
    uint32_t fst2;
2904

    
2905
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2906
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2907
    update_fcr31();
2908
    return fst2;
2909
}
2910

    
2911
uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2912
{
2913
    uint64_t fdt2;
2914

    
2915
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2916
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2917
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2918
    update_fcr31();
2919
    return fdt2;
2920
}
2921

    
2922
uint32_t helper_float_rsqrt_s(uint32_t fst0)
2923
{
2924
    uint32_t fst2;
2925

    
2926
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2927
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2928
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2929
    update_fcr31();
2930
    return fst2;
2931
}
2932

    
2933
uint64_t helper_float_recip1_d(uint64_t fdt0)
2934
{
2935
    uint64_t fdt2;
2936

    
2937
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2938
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2939
    update_fcr31();
2940
    return fdt2;
2941
}
2942

    
2943
uint32_t helper_float_recip1_s(uint32_t fst0)
2944
{
2945
    uint32_t fst2;
2946

    
2947
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2948
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2949
    update_fcr31();
2950
    return fst2;
2951
}
2952

    
2953
uint64_t helper_float_recip1_ps(uint64_t fdt0)
2954
{
2955
    uint32_t fst2;
2956
    uint32_t fsth2;
2957

    
2958
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2959
    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2960
    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2961
    update_fcr31();
2962
    return ((uint64_t)fsth2 << 32) | fst2;
2963
}
2964

    
2965
uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2966
{
2967
    uint64_t fdt2;
2968

    
2969
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2970
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2971
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2972
    update_fcr31();
2973
    return fdt2;
2974
}
2975

    
2976
uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2977
{
2978
    uint32_t fst2;
2979

    
2980
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2981
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2982
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2983
    update_fcr31();
2984
    return fst2;
2985
}
2986

    
2987
uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2988
{
2989
    uint32_t fst2;
2990
    uint32_t fsth2;
2991

    
2992
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2993
    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2994
    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2995
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2996
    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2997
    update_fcr31();
2998
    return ((uint64_t)fsth2 << 32) | fst2;
2999
}
3000

    
3001
#define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
3002

    
3003
/* binary operations */
3004
#define FLOAT_BINOP(name)                                          \
3005
uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
3006
{                                                                  \
3007
    uint64_t dt2;                                                  \
3008
                                                                   \
3009
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
3010
    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
3011
    update_fcr31();                                                \
3012
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
3013
        dt2 = FLOAT_QNAN64;                                        \
3014
    return dt2;                                                    \
3015
}                                                                  \
3016
                                                                   \
3017
uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
3018
{                                                                  \
3019
    uint32_t wt2;                                                  \
3020
                                                                   \
3021
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
3022
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
3023
    update_fcr31();                                                \
3024
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
3025
        wt2 = FLOAT_QNAN32;                                        \
3026
    return wt2;                                                    \
3027
}                                                                  \
3028
                                                                   \
3029
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
3030
{                                                                  \
3031
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
3032
    uint32_t fsth0 = fdt0 >> 32;                                   \
3033
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
3034
    uint32_t fsth1 = fdt1 >> 32;                                   \
3035
    uint32_t wt2;                                                  \
3036
    uint32_t wth2;                                                 \
3037
                                                                   \
3038
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
3039
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
3040
    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
3041
    update_fcr31();                                                \
3042
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
3043
        wt2 = FLOAT_QNAN32;                                        \
3044
        wth2 = FLOAT_QNAN32;                                       \
3045
    }                                                              \
3046
    return ((uint64_t)wth2 << 32) | wt2;                           \
3047
}
3048

    
3049
FLOAT_BINOP(add)
3050
FLOAT_BINOP(sub)
3051
FLOAT_BINOP(mul)
3052
FLOAT_BINOP(div)
3053
#undef FLOAT_BINOP
3054

    
3055
/* ternary operations */
3056
#define FLOAT_TERNOP(name1, name2)                                        \
3057
uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
3058
                                           uint64_t fdt2)                 \
3059
{                                                                         \
3060
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
3061
    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
3062
}                                                                         \
3063
                                                                          \
3064
uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
3065
                                           uint32_t fst2)                 \
3066
{                                                                         \
3067
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
3068
    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
3069
}                                                                         \
3070
                                                                          \
3071
uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
3072
                                            uint64_t fdt2)                \
3073
{                                                                         \
3074
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
3075
    uint32_t fsth0 = fdt0 >> 32;                                          \
3076
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
3077
    uint32_t fsth1 = fdt1 >> 32;                                          \
3078
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
3079
    uint32_t fsth2 = fdt2 >> 32;                                          \
3080
                                                                          \
3081
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
3082
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
3083
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
3084
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
3085
    return ((uint64_t)fsth2 << 32) | fst2;                                \
3086
}
3087

    
3088
FLOAT_TERNOP(mul, add)
3089
FLOAT_TERNOP(mul, sub)
3090
#undef FLOAT_TERNOP
3091

    
3092
/* negated ternary operations */
3093
#define FLOAT_NTERNOP(name1, name2)                                       \
3094
uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3095
                                           uint64_t fdt2)                 \
3096
{                                                                         \
3097
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
3098
    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
3099
    return float64_chs(fdt2);                                             \
3100
}                                                                         \
3101
                                                                          \
3102
uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3103
                                           uint32_t fst2)                 \
3104
{                                                                         \
3105
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
3106
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
3107
    return float32_chs(fst2);                                             \
3108
}                                                                         \
3109
                                                                          \
3110
uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
3111
                                           uint64_t fdt2)                 \
3112
{                                                                         \
3113
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
3114
    uint32_t fsth0 = fdt0 >> 32;                                          \
3115
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
3116
    uint32_t fsth1 = fdt1 >> 32;                                          \
3117
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
3118
    uint32_t fsth2 = fdt2 >> 32;                                          \
3119
                                                                          \
3120
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
3121
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
3122
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
3123
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
3124
    fst2 = float32_chs(fst2);                                             \
3125
    fsth2 = float32_chs(fsth2);                                           \
3126
    return ((uint64_t)fsth2 << 32) | fst2;                                \
3127
}
3128

    
3129
FLOAT_NTERNOP(mul, add)
3130
FLOAT_NTERNOP(mul, sub)
3131
#undef FLOAT_NTERNOP
3132

    
3133
/* MIPS specific binary operations */
3134
uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
3135
{
3136
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3137
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3138
    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
3139
    update_fcr31();
3140
    return fdt2;
3141
}
3142

    
3143
uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
3144
{
3145
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3146
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3147
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3148
    update_fcr31();
3149
    return fst2;
3150
}
3151

    
3152
uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
3153
{
3154
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3155
    uint32_t fsth0 = fdt0 >> 32;
3156
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3157
    uint32_t fsth2 = fdt2 >> 32;
3158

    
3159
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3160
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3161
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3162
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3163
    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
3164
    update_fcr31();
3165
    return ((uint64_t)fsth2 << 32) | fst2;
3166
}
3167

    
3168
uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
3169
{
3170
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3171
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3172
    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
3173
    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
3174
    update_fcr31();
3175
    return fdt2;
3176
}
3177

    
3178
uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
3179
{
3180
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3181
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3182
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3183
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3184
    update_fcr31();
3185
    return fst2;
3186
}
3187

    
3188
uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
3189
{
3190
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3191
    uint32_t fsth0 = fdt0 >> 32;
3192
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3193
    uint32_t fsth2 = fdt2 >> 32;
3194

    
3195
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3196
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3197
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3198
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3199
    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
3200
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3201
    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3202
    update_fcr31();
3203
    return ((uint64_t)fsth2 << 32) | fst2;
3204
}
3205

    
3206
uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
3207
{
3208
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3209
    uint32_t fsth0 = fdt0 >> 32;
3210
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3211
    uint32_t fsth1 = fdt1 >> 32;
3212
    uint32_t fst2;
3213
    uint32_t fsth2;
3214

    
3215
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3216
    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3217
    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3218
    update_fcr31();
3219
    return ((uint64_t)fsth2 << 32) | fst2;
3220
}
3221

    
3222
uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
3223
{
3224
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3225
    uint32_t fsth0 = fdt0 >> 32;
3226
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3227
    uint32_t fsth1 = fdt1 >> 32;
3228
    uint32_t fst2;
3229
    uint32_t fsth2;
3230

    
3231
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3232
    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
3233
    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
3234
    update_fcr31();
3235
    return ((uint64_t)fsth2 << 32) | fst2;
3236
}
3237

    
3238
/* compare operations */
3239
#define FOP_COND_D(op, cond)                                   \
3240
void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
3241
{                                                              \
3242
    int c;                                                     \
3243
    set_float_exception_flags(0, &env->active_fpu.fp_status);  \
3244
    c = cond;                                                  \
3245
    update_fcr31();                                            \
3246
    if (c)                                                     \
3247
        SET_FP_COND(cc, env->active_fpu);                      \
3248
    else                                                       \
3249
        CLEAR_FP_COND(cc, env->active_fpu);                    \
3250
}                                                              \
3251
void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3252
{                                                              \
3253
    int c;                                                     \
3254
    set_float_exception_flags(0, &env->active_fpu.fp_status);  \
3255
    fdt0 = float64_abs(fdt0);                                  \
3256
    fdt1 = float64_abs(fdt1);                                  \
3257
    c = cond;                                                  \
3258
    update_fcr31();                                            \
3259
    if (c)                                                     \
3260
        SET_FP_COND(cc, env->active_fpu);                      \
3261
    else                                                       \
3262
        CLEAR_FP_COND(cc, env->active_fpu);                    \
3263
}
3264

    
3265
/* NOTE: the comma operator will make "cond" to eval to false,
3266
 * but float64_unordered_quiet() is still called. */
3267
FOP_COND_D(f,   (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3268
FOP_COND_D(un,  float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
3269
FOP_COND_D(eq,  float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3270
FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3271
FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3272
FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3273
FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3274
FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3275
/* NOTE: the comma operator will make "cond" to eval to false,
3276
 * but float64_unordered() is still called. */
3277
FOP_COND_D(sf,  (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3278
FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
3279
FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3280
FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3281
FOP_COND_D(lt,  float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3282
FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3283
FOP_COND_D(le,  float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3284
FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3285

    
3286
#define FOP_COND_S(op, cond)                                   \
3287
void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
3288
{                                                              \
3289
    int c;                                                     \
3290
    set_float_exception_flags(0, &env->active_fpu.fp_status);  \
3291
    c = cond;                                                  \
3292
    update_fcr31();                                            \
3293
    if (c)                                                     \
3294
        SET_FP_COND(cc, env->active_fpu);                      \
3295
    else                                                       \
3296
        CLEAR_FP_COND(cc, env->active_fpu);                    \
3297
}                                                              \
3298
void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3299
{                                                              \
3300
    int c;                                                     \
3301
    set_float_exception_flags(0, &env->active_fpu.fp_status);  \
3302
    fst0 = float32_abs(fst0);                                  \
3303
    fst1 = float32_abs(fst1);                                  \
3304
    c = cond;                                                  \
3305
    update_fcr31();                                            \
3306
    if (c)                                                     \
3307
        SET_FP_COND(cc, env->active_fpu);                      \
3308
    else                                                       \
3309
        CLEAR_FP_COND(cc, env->active_fpu);                    \
3310
}
3311

    
3312
/* NOTE: the comma operator will make "cond" to eval to false,
3313
 * but float32_unordered_quiet() is still called. */
3314
FOP_COND_S(f,   (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3315
FOP_COND_S(un,  float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
3316
FOP_COND_S(eq,  float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3317
FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)  || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3318
FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3319
FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)  || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3320
FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3321
FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)  || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3322
/* NOTE: the comma operator will make "cond" to eval to false,
3323
 * but float32_unordered() is still called. */
3324
FOP_COND_S(sf,  (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3325
FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
3326
FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3327
FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3328
FOP_COND_S(lt,  float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3329
FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3330
FOP_COND_S(le,  float32_le(fst0, fst1, &env->active_fpu.fp_status))
3331
FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3332

    
3333
#define FOP_COND_PS(op, condl, condh)                           \
3334
void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
3335
{                                                               \
3336
    uint32_t fst0, fsth0, fst1, fsth1;                          \
3337
    int ch, cl;                                                 \
3338
    set_float_exception_flags(0, &env->active_fpu.fp_status);   \
3339
    fst0 = fdt0 & 0XFFFFFFFF;                                   \
3340
    fsth0 = fdt0 >> 32;                                         \
3341
    fst1 = fdt1 & 0XFFFFFFFF;                                   \
3342
    fsth1 = fdt1 >> 32;                                         \
3343
    cl = condl;                                                 \
3344
    ch = condh;                                                 \
3345
    update_fcr31();                                             \
3346
    if (cl)                                                     \
3347
        SET_FP_COND(cc, env->active_fpu);                       \
3348
    else                                                        \
3349
        CLEAR_FP_COND(cc, env->active_fpu);                     \
3350
    if (ch)                                                     \
3351
        SET_FP_COND(cc + 1, env->active_fpu);                   \
3352
    else                                                        \
3353
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3354
}                                                               \
3355
void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3356
{                                                               \
3357
    uint32_t fst0, fsth0, fst1, fsth1;                          \
3358
    int ch, cl;                                                 \
3359
    fst0 = float32_abs(fdt0 & 0XFFFFFFFF);                      \
3360
    fsth0 = float32_abs(fdt0 >> 32);                            \
3361
    fst1 = float32_abs(fdt1 & 0XFFFFFFFF);                      \
3362
    fsth1 = float32_abs(fdt1 >> 32);                            \
3363
    cl = condl;                                                 \
3364
    ch = condh;                                                 \
3365
    update_fcr31();                                             \
3366
    if (cl)                                                     \
3367
        SET_FP_COND(cc, env->active_fpu);                       \
3368
    else                                                        \
3369
        CLEAR_FP_COND(cc, env->active_fpu);                     \
3370
    if (ch)                                                     \
3371
        SET_FP_COND(cc + 1, env->active_fpu);                   \
3372
    else                                                        \
3373
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3374
}
3375

    
3376
/* NOTE: the comma operator will make "cond" to eval to false,
3377
 * but float32_unordered_quiet() is still called. */
3378
FOP_COND_PS(f,   (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3379
                 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3380
FOP_COND_PS(un,  float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3381
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3382
FOP_COND_PS(eq,  float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3383
                 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3384
FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)    || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3385
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3386
FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3387
                 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3388
FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)    || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3389
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3390
FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3391
                 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3392
FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)    || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3393
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3394
/* NOTE: the comma operator will make "cond" to eval to false,
3395
 * but float32_unordered() is still called. */
3396
FOP_COND_PS(sf,  (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3397
                 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3398
FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3399
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3400
FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3401
                 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3402
FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3403
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3404
FOP_COND_PS(lt,  float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3405
                 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3406
FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3407
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3408
FOP_COND_PS(le,  float32_le(fst0, fst1, &env->active_fpu.fp_status),
3409
                 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3410
FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3411
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))