Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ fe8dca8c

History | View | Annotate | Download (100.3 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdlib.h>
20
#include "cpu.h"
21
#include "dyngen-exec.h"
22

    
23
#include "host-utils.h"
24

    
25
#include "helper.h"
26

    
27
#if !defined(CONFIG_USER_ONLY)
28
#include "softmmu_exec.h"
29
#endif /* !defined(CONFIG_USER_ONLY) */
30

    
31
#ifndef CONFIG_USER_ONLY
32
static inline void cpu_mips_tlb_flush (CPUState *env, int flush_global);
33
#endif
34

    
35
static inline void compute_hflags(CPUState *env)
36
{
37
    env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
38
                     MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
39
                     MIPS_HFLAG_UX);
40
    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
41
        !(env->CP0_Status & (1 << CP0St_ERL)) &&
42
        !(env->hflags & MIPS_HFLAG_DM)) {
43
        env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
44
    }
45
#if defined(TARGET_MIPS64)
46
    if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
47
        (env->CP0_Status & (1 << CP0St_PX)) ||
48
        (env->CP0_Status & (1 << CP0St_UX))) {
49
        env->hflags |= MIPS_HFLAG_64;
50
    }
51
    if (env->CP0_Status & (1 << CP0St_UX)) {
52
        env->hflags |= MIPS_HFLAG_UX;
53
    }
54
#endif
55
    if ((env->CP0_Status & (1 << CP0St_CU0)) ||
56
        !(env->hflags & MIPS_HFLAG_KSU)) {
57
        env->hflags |= MIPS_HFLAG_CP0;
58
    }
59
    if (env->CP0_Status & (1 << CP0St_CU1)) {
60
        env->hflags |= MIPS_HFLAG_FPU;
61
    }
62
    if (env->CP0_Status & (1 << CP0St_FR)) {
63
        env->hflags |= MIPS_HFLAG_F64;
64
    }
65
    if (env->insn_flags & ISA_MIPS32R2) {
66
        if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
67
            env->hflags |= MIPS_HFLAG_COP1X;
68
        }
69
    } else if (env->insn_flags & ISA_MIPS32) {
70
        if (env->hflags & MIPS_HFLAG_64) {
71
            env->hflags |= MIPS_HFLAG_COP1X;
72
        }
73
    } else if (env->insn_flags & ISA_MIPS4) {
74
        /* All supported MIPS IV CPUs use the XX (CU3) to enable
75
           and disable the MIPS IV extensions to the MIPS III ISA.
76
           Some other MIPS IV CPUs ignore the bit, so the check here
77
           would be too restrictive for them.  */
78
        if (env->CP0_Status & (1 << CP0St_CU3)) {
79
            env->hflags |= MIPS_HFLAG_COP1X;
80
        }
81
    }
82
}
83

    
84
/*****************************************************************************/
85
/* Exceptions processing helpers */
86

    
87
void helper_raise_exception_err (uint32_t exception, int error_code)
88
{
89
#if 1
90
    if (exception < 0x100)
91
        qemu_log("%s: %d %d\n", __func__, exception, error_code);
92
#endif
93
    env->exception_index = exception;
94
    env->error_code = error_code;
95
    cpu_loop_exit(env);
96
}
97

    
98
void helper_raise_exception (uint32_t exception)
99
{
100
    helper_raise_exception_err(exception, 0);
101
}
102

    
103
#if !defined(CONFIG_USER_ONLY)
104
static void do_restore_state (void *pc_ptr)
105
{
106
    TranslationBlock *tb;
107
    unsigned long pc = (unsigned long) pc_ptr;
108
    
109
    tb = tb_find_pc (pc);
110
    if (tb) {
111
        cpu_restore_state(tb, env, pc);
112
    }
113
}
114
#endif
115

    
116
#if defined(CONFIG_USER_ONLY)
117
#define HELPER_LD(name, insn, type)                                     \
118
static inline type do_##name(target_ulong addr, int mem_idx)            \
119
{                                                                       \
120
    return (type) insn##_raw(addr);                                     \
121
}
122
#else
123
#define HELPER_LD(name, insn, type)                                     \
124
static inline type do_##name(target_ulong addr, int mem_idx)            \
125
{                                                                       \
126
    switch (mem_idx)                                                    \
127
    {                                                                   \
128
    case 0: return (type) insn##_kernel(addr); break;                   \
129
    case 1: return (type) insn##_super(addr); break;                    \
130
    default:                                                            \
131
    case 2: return (type) insn##_user(addr); break;                     \
132
    }                                                                   \
133
}
134
#endif
135
HELPER_LD(lbu, ldub, uint8_t)
136
HELPER_LD(lw, ldl, int32_t)
137
#ifdef TARGET_MIPS64
138
HELPER_LD(ld, ldq, int64_t)
139
#endif
140
#undef HELPER_LD
141

    
142
#if defined(CONFIG_USER_ONLY)
143
#define HELPER_ST(name, insn, type)                                     \
144
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
145
{                                                                       \
146
    insn##_raw(addr, val);                                              \
147
}
148
#else
149
#define HELPER_ST(name, insn, type)                                     \
150
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
151
{                                                                       \
152
    switch (mem_idx)                                                    \
153
    {                                                                   \
154
    case 0: insn##_kernel(addr, val); break;                            \
155
    case 1: insn##_super(addr, val); break;                             \
156
    default:                                                            \
157
    case 2: insn##_user(addr, val); break;                              \
158
    }                                                                   \
159
}
160
#endif
161
HELPER_ST(sb, stb, uint8_t)
162
HELPER_ST(sw, stl, uint32_t)
163
#ifdef TARGET_MIPS64
164
HELPER_ST(sd, stq, uint64_t)
165
#endif
166
#undef HELPER_ST
167

    
168
target_ulong helper_clo (target_ulong arg1)
169
{
170
    return clo32(arg1);
171
}
172

    
173
target_ulong helper_clz (target_ulong arg1)
174
{
175
    return clz32(arg1);
176
}
177

    
178
#if defined(TARGET_MIPS64)
179
target_ulong helper_dclo (target_ulong arg1)
180
{
181
    return clo64(arg1);
182
}
183

    
184
target_ulong helper_dclz (target_ulong arg1)
185
{
186
    return clz64(arg1);
187
}
188
#endif /* TARGET_MIPS64 */
189

    
190
/* 64 bits arithmetic for 32 bits hosts */
191
static inline uint64_t get_HILO (void)
192
{
193
    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
194
}
195

    
196
static inline void set_HILO (uint64_t HILO)
197
{
198
    env->active_tc.LO[0] = (int32_t)HILO;
199
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
200
}
201

    
202
static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
203
{
204
    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
205
    arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
206
}
207

    
208
static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
209
{
210
    arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
211
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
212
}
213

    
214
/* Multiplication variants of the vr54xx. */
215
target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
216
{
217
    set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
218

    
219
    return arg1;
220
}
221

    
222
target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
223
{
224
    set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
225

    
226
    return arg1;
227
}
228

    
229
target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
230
{
231
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
232

    
233
    return arg1;
234
}
235

    
236
target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
237
{
238
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
239

    
240
    return arg1;
241
}
242

    
243
target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
244
{
245
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
246

    
247
    return arg1;
248
}
249

    
250
target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
251
{
252
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
253

    
254
    return arg1;
255
}
256

    
257
target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
258
{
259
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
260

    
261
    return arg1;
262
}
263

    
264
target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
265
{
266
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
267

    
268
    return arg1;
269
}
270

    
271
target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
272
{
273
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
274

    
275
    return arg1;
276
}
277

    
278
target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
279
{
280
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
281

    
282
    return arg1;
283
}
284

    
285
target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
286
{
287
    set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
288

    
289
    return arg1;
290
}
291

    
292
target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
293
{
294
    set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
295

    
296
    return arg1;
297
}
298

    
299
target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
300
{
301
    set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
302

    
303
    return arg1;
304
}
305

    
306
target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
307
{
308
    set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
309

    
310
    return arg1;
311
}
312

    
313
#ifdef TARGET_MIPS64
314
void helper_dmult (target_ulong arg1, target_ulong arg2)
315
{
316
    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
317
}
318

    
319
void helper_dmultu (target_ulong arg1, target_ulong arg2)
320
{
321
    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
322
}
323
#endif
324

    
325
#ifndef CONFIG_USER_ONLY
326

    
327
static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
328
{
329
    target_phys_addr_t lladdr;
330

    
331
    lladdr = cpu_mips_translate_address(env, address, rw);
332

    
333
    if (lladdr == -1LL) {
334
        cpu_loop_exit(env);
335
    } else {
336
        return lladdr;
337
    }
338
}
339

    
340
#define HELPER_LD_ATOMIC(name, insn)                                          \
341
target_ulong helper_##name(target_ulong arg, int mem_idx)                     \
342
{                                                                             \
343
    env->lladdr = do_translate_address(arg, 0);                               \
344
    env->llval = do_##insn(arg, mem_idx);                                     \
345
    return env->llval;                                                        \
346
}
347
HELPER_LD_ATOMIC(ll, lw)
348
#ifdef TARGET_MIPS64
349
HELPER_LD_ATOMIC(lld, ld)
350
#endif
351
#undef HELPER_LD_ATOMIC
352

    
353
#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask)                      \
354
target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
355
{                                                                             \
356
    target_long tmp;                                                          \
357
                                                                              \
358
    if (arg2 & almask) {                                                      \
359
        env->CP0_BadVAddr = arg2;                                             \
360
        helper_raise_exception(EXCP_AdES);                                    \
361
    }                                                                         \
362
    if (do_translate_address(arg2, 1) == env->lladdr) {                       \
363
        tmp = do_##ld_insn(arg2, mem_idx);                                    \
364
        if (tmp == env->llval) {                                              \
365
            do_##st_insn(arg2, arg1, mem_idx);                                \
366
            return 1;                                                         \
367
        }                                                                     \
368
    }                                                                         \
369
    return 0;                                                                 \
370
}
371
HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
372
#ifdef TARGET_MIPS64
373
HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
374
#endif
375
#undef HELPER_ST_ATOMIC
376
#endif
377

    
378
#ifdef TARGET_WORDS_BIGENDIAN
379
#define GET_LMASK(v) ((v) & 3)
380
#define GET_OFFSET(addr, offset) (addr + (offset))
381
#else
382
#define GET_LMASK(v) (((v) & 3) ^ 3)
383
#define GET_OFFSET(addr, offset) (addr - (offset))
384
#endif
385

    
386
target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
387
{
388
    target_ulong tmp;
389

    
390
    tmp = do_lbu(arg2, mem_idx);
391
    arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
392

    
393
    if (GET_LMASK(arg2) <= 2) {
394
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
395
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
396
    }
397

    
398
    if (GET_LMASK(arg2) <= 1) {
399
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
400
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
401
    }
402

    
403
    if (GET_LMASK(arg2) == 0) {
404
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
405
        arg1 = (arg1 & 0xFFFFFF00) | tmp;
406
    }
407
    return (int32_t)arg1;
408
}
409

    
410
target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
411
{
412
    target_ulong tmp;
413

    
414
    tmp = do_lbu(arg2, mem_idx);
415
    arg1 = (arg1 & 0xFFFFFF00) | tmp;
416

    
417
    if (GET_LMASK(arg2) >= 1) {
418
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
419
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
420
    }
421

    
422
    if (GET_LMASK(arg2) >= 2) {
423
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
424
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
425
    }
426

    
427
    if (GET_LMASK(arg2) == 3) {
428
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
429
        arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
430
    }
431
    return (int32_t)arg1;
432
}
433

    
434
void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
435
{
436
    do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
437

    
438
    if (GET_LMASK(arg2) <= 2)
439
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
440

    
441
    if (GET_LMASK(arg2) <= 1)
442
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
443

    
444
    if (GET_LMASK(arg2) == 0)
445
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
446
}
447

    
448
void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
449
{
450
    do_sb(arg2, (uint8_t)arg1, mem_idx);
451

    
452
    if (GET_LMASK(arg2) >= 1)
453
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
454

    
455
    if (GET_LMASK(arg2) >= 2)
456
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
457

    
458
    if (GET_LMASK(arg2) == 3)
459
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
460
}
461

    
462
#if defined(TARGET_MIPS64)
463
/* "half" load and stores.  We must do the memory access inline,
464
   or fault handling won't work.  */
465

    
466
#ifdef TARGET_WORDS_BIGENDIAN
467
#define GET_LMASK64(v) ((v) & 7)
468
#else
469
#define GET_LMASK64(v) (((v) & 7) ^ 7)
470
#endif
471

    
472
target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
473
{
474
    uint64_t tmp;
475

    
476
    tmp = do_lbu(arg2, mem_idx);
477
    arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
478

    
479
    if (GET_LMASK64(arg2) <= 6) {
480
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
481
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
482
    }
483

    
484
    if (GET_LMASK64(arg2) <= 5) {
485
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
486
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
487
    }
488

    
489
    if (GET_LMASK64(arg2) <= 4) {
490
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
491
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
492
    }
493

    
494
    if (GET_LMASK64(arg2) <= 3) {
495
        tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
496
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
497
    }
498

    
499
    if (GET_LMASK64(arg2) <= 2) {
500
        tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
501
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
502
    }
503

    
504
    if (GET_LMASK64(arg2) <= 1) {
505
        tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
506
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
507
    }
508

    
509
    if (GET_LMASK64(arg2) == 0) {
510
        tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
511
        arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
512
    }
513

    
514
    return arg1;
515
}
516

    
517
target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
518
{
519
    uint64_t tmp;
520

    
521
    tmp = do_lbu(arg2, mem_idx);
522
    arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
523

    
524
    if (GET_LMASK64(arg2) >= 1) {
525
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
526
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
527
    }
528

    
529
    if (GET_LMASK64(arg2) >= 2) {
530
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
531
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
532
    }
533

    
534
    if (GET_LMASK64(arg2) >= 3) {
535
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
536
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
537
    }
538

    
539
    if (GET_LMASK64(arg2) >= 4) {
540
        tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
541
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
542
    }
543

    
544
    if (GET_LMASK64(arg2) >= 5) {
545
        tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
546
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
547
    }
548

    
549
    if (GET_LMASK64(arg2) >= 6) {
550
        tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
551
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
552
    }
553

    
554
    if (GET_LMASK64(arg2) == 7) {
555
        tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
556
        arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
557
    }
558

    
559
    return arg1;
560
}
561

    
562
void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
563
{
564
    do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
565

    
566
    if (GET_LMASK64(arg2) <= 6)
567
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
568

    
569
    if (GET_LMASK64(arg2) <= 5)
570
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
571

    
572
    if (GET_LMASK64(arg2) <= 4)
573
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
574

    
575
    if (GET_LMASK64(arg2) <= 3)
576
        do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
577

    
578
    if (GET_LMASK64(arg2) <= 2)
579
        do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
580

    
581
    if (GET_LMASK64(arg2) <= 1)
582
        do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
583

    
584
    if (GET_LMASK64(arg2) <= 0)
585
        do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
586
}
587

    
588
void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
589
{
590
    do_sb(arg2, (uint8_t)arg1, mem_idx);
591

    
592
    if (GET_LMASK64(arg2) >= 1)
593
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
594

    
595
    if (GET_LMASK64(arg2) >= 2)
596
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
597

    
598
    if (GET_LMASK64(arg2) >= 3)
599
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
600

    
601
    if (GET_LMASK64(arg2) >= 4)
602
        do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
603

    
604
    if (GET_LMASK64(arg2) >= 5)
605
        do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
606

    
607
    if (GET_LMASK64(arg2) >= 6)
608
        do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
609

    
610
    if (GET_LMASK64(arg2) == 7)
611
        do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
612
}
613
#endif /* TARGET_MIPS64 */
614

    
615
static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
616

    
617
void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
618
{
619
    target_ulong base_reglist = reglist & 0xf;
620
    target_ulong do_r31 = reglist & 0x10;
621
#ifdef CONFIG_USER_ONLY
622
#undef ldfun
623
#define ldfun ldl_raw
624
#else
625
    uint32_t (*ldfun)(target_ulong);
626

    
627
    switch (mem_idx)
628
    {
629
    case 0: ldfun = ldl_kernel; break;
630
    case 1: ldfun = ldl_super; break;
631
    default:
632
    case 2: ldfun = ldl_user; break;
633
    }
634
#endif
635

    
636
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
637
        target_ulong i;
638

    
639
        for (i = 0; i < base_reglist; i++) {
640
            env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
641
            addr += 4;
642
        }
643
    }
644

    
645
    if (do_r31) {
646
        env->active_tc.gpr[31] = (target_long) ldfun(addr);
647
    }
648
}
649

    
650
void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
651
{
652
    target_ulong base_reglist = reglist & 0xf;
653
    target_ulong do_r31 = reglist & 0x10;
654
#ifdef CONFIG_USER_ONLY
655
#undef stfun
656
#define stfun stl_raw
657
#else
658
    void (*stfun)(target_ulong, uint32_t);
659

    
660
    switch (mem_idx)
661
    {
662
    case 0: stfun = stl_kernel; break;
663
    case 1: stfun = stl_super; break;
664
     default:
665
    case 2: stfun = stl_user; break;
666
    }
667
#endif
668

    
669
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
670
        target_ulong i;
671

    
672
        for (i = 0; i < base_reglist; i++) {
673
            stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
674
            addr += 4;
675
        }
676
    }
677

    
678
    if (do_r31) {
679
        stfun(addr, env->active_tc.gpr[31]);
680
    }
681
}
682

    
683
#if defined(TARGET_MIPS64)
684
void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
685
{
686
    target_ulong base_reglist = reglist & 0xf;
687
    target_ulong do_r31 = reglist & 0x10;
688
#ifdef CONFIG_USER_ONLY
689
#undef ldfun
690
#define ldfun ldq_raw
691
#else
692
    uint64_t (*ldfun)(target_ulong);
693

    
694
    switch (mem_idx)
695
    {
696
    case 0: ldfun = ldq_kernel; break;
697
    case 1: ldfun = ldq_super; break;
698
    default:
699
    case 2: ldfun = ldq_user; break;
700
    }
701
#endif
702

    
703
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
704
        target_ulong i;
705

    
706
        for (i = 0; i < base_reglist; i++) {
707
            env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
708
            addr += 8;
709
        }
710
    }
711

    
712
    if (do_r31) {
713
        env->active_tc.gpr[31] = ldfun(addr);
714
    }
715
}
716

    
717
void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
718
{
719
    target_ulong base_reglist = reglist & 0xf;
720
    target_ulong do_r31 = reglist & 0x10;
721
#ifdef CONFIG_USER_ONLY
722
#undef stfun
723
#define stfun stq_raw
724
#else
725
    void (*stfun)(target_ulong, uint64_t);
726

    
727
    switch (mem_idx)
728
    {
729
    case 0: stfun = stq_kernel; break;
730
    case 1: stfun = stq_super; break;
731
     default:
732
    case 2: stfun = stq_user; break;
733
    }
734
#endif
735

    
736
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
737
        target_ulong i;
738

    
739
        for (i = 0; i < base_reglist; i++) {
740
            stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
741
            addr += 8;
742
        }
743
    }
744

    
745
    if (do_r31) {
746
        stfun(addr, env->active_tc.gpr[31]);
747
    }
748
}
749
#endif
750

    
751
#ifndef CONFIG_USER_ONLY
752
/* tc should point to an int with the value of the global TC index.
753
   This function will transform it into a local index within the
754
   returned CPUState.
755

756
   FIXME: This code assumes that all VPEs have the same number of TCs,
757
          which depends on runtime setup. Can probably be fixed by
758
          walking the list of CPUStates.  */
759
static CPUState *mips_cpu_map_tc(int *tc)
760
{
761
    CPUState *other;
762
    int vpe_idx, nr_threads = env->nr_threads;
763
    int tc_idx = *tc;
764

    
765
    if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
766
        /* Not allowed to address other CPUs.  */
767
        *tc = env->current_tc;
768
        return env;
769
    }
770

    
771
    vpe_idx = tc_idx / nr_threads;
772
    *tc = tc_idx % nr_threads;
773
    other = qemu_get_cpu(vpe_idx);
774
    return other ? other : env;
775
}
776

    
777
/* The per VPE CP0_Status register shares some fields with the per TC
778
   CP0_TCStatus registers. These fields are wired to the same registers,
779
   so changes to either of them should be reflected on both registers.
780

781
   Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
782

783
   These helper call synchronizes the regs for a given cpu.  */
784

    
785
/* Called for updates to CP0_Status.  */
786
static void sync_c0_status(CPUState *cpu, int tc)
787
{
788
    int32_t tcstatus, *tcst;
789
    uint32_t v = cpu->CP0_Status;
790
    uint32_t cu, mx, asid, ksu;
791
    uint32_t mask = ((1 << CP0TCSt_TCU3)
792
                       | (1 << CP0TCSt_TCU2)
793
                       | (1 << CP0TCSt_TCU1)
794
                       | (1 << CP0TCSt_TCU0)
795
                       | (1 << CP0TCSt_TMX)
796
                       | (3 << CP0TCSt_TKSU)
797
                       | (0xff << CP0TCSt_TASID));
798

    
799
    cu = (v >> CP0St_CU0) & 0xf;
800
    mx = (v >> CP0St_MX) & 0x1;
801
    ksu = (v >> CP0St_KSU) & 0x3;
802
    asid = env->CP0_EntryHi & 0xff;
803

    
804
    tcstatus = cu << CP0TCSt_TCU0;
805
    tcstatus |= mx << CP0TCSt_TMX;
806
    tcstatus |= ksu << CP0TCSt_TKSU;
807
    tcstatus |= asid;
808

    
809
    if (tc == cpu->current_tc) {
810
        tcst = &cpu->active_tc.CP0_TCStatus;
811
    } else {
812
        tcst = &cpu->tcs[tc].CP0_TCStatus;
813
    }
814

    
815
    *tcst &= ~mask;
816
    *tcst |= tcstatus;
817
    compute_hflags(cpu);
818
}
819

    
820
/* Called for updates to CP0_TCStatus.  */
821
static void sync_c0_tcstatus(CPUState *cpu, int tc, target_ulong v)
822
{
823
    uint32_t status;
824
    uint32_t tcu, tmx, tasid, tksu;
825
    uint32_t mask = ((1 << CP0St_CU3)
826
                       | (1 << CP0St_CU2)
827
                       | (1 << CP0St_CU1)
828
                       | (1 << CP0St_CU0)
829
                       | (1 << CP0St_MX)
830
                       | (3 << CP0St_KSU));
831

    
832
    tcu = (v >> CP0TCSt_TCU0) & 0xf;
833
    tmx = (v >> CP0TCSt_TMX) & 0x1;
834
    tasid = v & 0xff;
835
    tksu = (v >> CP0TCSt_TKSU) & 0x3;
836

    
837
    status = tcu << CP0St_CU0;
838
    status |= tmx << CP0St_MX;
839
    status |= tksu << CP0St_KSU;
840

    
841
    cpu->CP0_Status &= ~mask;
842
    cpu->CP0_Status |= status;
843

    
844
    /* Sync the TASID with EntryHi.  */
845
    cpu->CP0_EntryHi &= ~0xff;
846
    cpu->CP0_EntryHi = tasid;
847

    
848
    compute_hflags(cpu);
849
}
850

    
851
/* Called for updates to CP0_EntryHi.  */
852
static void sync_c0_entryhi(CPUState *cpu, int tc)
853
{
854
    int32_t *tcst;
855
    uint32_t asid, v = cpu->CP0_EntryHi;
856

    
857
    asid = v & 0xff;
858

    
859
    if (tc == cpu->current_tc) {
860
        tcst = &cpu->active_tc.CP0_TCStatus;
861
    } else {
862
        tcst = &cpu->tcs[tc].CP0_TCStatus;
863
    }
864

    
865
    *tcst &= ~0xff;
866
    *tcst |= asid;
867
}
868

    
869
/* CP0 helpers */
870
target_ulong helper_mfc0_mvpcontrol (void)
871
{
872
    return env->mvp->CP0_MVPControl;
873
}
874

    
875
target_ulong helper_mfc0_mvpconf0 (void)
876
{
877
    return env->mvp->CP0_MVPConf0;
878
}
879

    
880
target_ulong helper_mfc0_mvpconf1 (void)
881
{
882
    return env->mvp->CP0_MVPConf1;
883
}
884

    
885
target_ulong helper_mfc0_random (void)
886
{
887
    return (int32_t)cpu_mips_get_random(env);
888
}
889

    
890
target_ulong helper_mfc0_tcstatus (void)
891
{
892
    return env->active_tc.CP0_TCStatus;
893
}
894

    
895
target_ulong helper_mftc0_tcstatus(void)
896
{
897
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
898
    CPUState *other = mips_cpu_map_tc(&other_tc);
899

    
900
    if (other_tc == other->current_tc)
901
        return other->active_tc.CP0_TCStatus;
902
    else
903
        return other->tcs[other_tc].CP0_TCStatus;
904
}
905

    
906
target_ulong helper_mfc0_tcbind (void)
907
{
908
    return env->active_tc.CP0_TCBind;
909
}
910

    
911
target_ulong helper_mftc0_tcbind(void)
912
{
913
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
914
    CPUState *other = mips_cpu_map_tc(&other_tc);
915

    
916
    if (other_tc == other->current_tc)
917
        return other->active_tc.CP0_TCBind;
918
    else
919
        return other->tcs[other_tc].CP0_TCBind;
920
}
921

    
922
target_ulong helper_mfc0_tcrestart (void)
923
{
924
    return env->active_tc.PC;
925
}
926

    
927
target_ulong helper_mftc0_tcrestart(void)
928
{
929
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
930
    CPUState *other = mips_cpu_map_tc(&other_tc);
931

    
932
    if (other_tc == other->current_tc)
933
        return other->active_tc.PC;
934
    else
935
        return other->tcs[other_tc].PC;
936
}
937

    
938
target_ulong helper_mfc0_tchalt (void)
939
{
940
    return env->active_tc.CP0_TCHalt;
941
}
942

    
943
target_ulong helper_mftc0_tchalt(void)
944
{
945
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
946
    CPUState *other = mips_cpu_map_tc(&other_tc);
947

    
948
    if (other_tc == other->current_tc)
949
        return other->active_tc.CP0_TCHalt;
950
    else
951
        return other->tcs[other_tc].CP0_TCHalt;
952
}
953

    
954
target_ulong helper_mfc0_tccontext (void)
955
{
956
    return env->active_tc.CP0_TCContext;
957
}
958

    
959
target_ulong helper_mftc0_tccontext(void)
960
{
961
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
962
    CPUState *other = mips_cpu_map_tc(&other_tc);
963

    
964
    if (other_tc == other->current_tc)
965
        return other->active_tc.CP0_TCContext;
966
    else
967
        return other->tcs[other_tc].CP0_TCContext;
968
}
969

    
970
target_ulong helper_mfc0_tcschedule (void)
971
{
972
    return env->active_tc.CP0_TCSchedule;
973
}
974

    
975
target_ulong helper_mftc0_tcschedule(void)
976
{
977
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
978
    CPUState *other = mips_cpu_map_tc(&other_tc);
979

    
980
    if (other_tc == other->current_tc)
981
        return other->active_tc.CP0_TCSchedule;
982
    else
983
        return other->tcs[other_tc].CP0_TCSchedule;
984
}
985

    
986
target_ulong helper_mfc0_tcschefback (void)
987
{
988
    return env->active_tc.CP0_TCScheFBack;
989
}
990

    
991
target_ulong helper_mftc0_tcschefback(void)
992
{
993
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
994
    CPUState *other = mips_cpu_map_tc(&other_tc);
995

    
996
    if (other_tc == other->current_tc)
997
        return other->active_tc.CP0_TCScheFBack;
998
    else
999
        return other->tcs[other_tc].CP0_TCScheFBack;
1000
}
1001

    
1002
target_ulong helper_mfc0_count (void)
1003
{
1004
    return (int32_t)cpu_mips_get_count(env);
1005
}
1006

    
1007
target_ulong helper_mftc0_entryhi(void)
1008
{
1009
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1010
    CPUState *other = mips_cpu_map_tc(&other_tc);
1011

    
1012
    return other->CP0_EntryHi;
1013
}
1014

    
1015
target_ulong helper_mftc0_status(void)
1016
{
1017
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1018
    CPUState *other = mips_cpu_map_tc(&other_tc);
1019

    
1020
    return other->CP0_Status;
1021
}
1022

    
1023
target_ulong helper_mfc0_lladdr (void)
1024
{
1025
    return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
1026
}
1027

    
1028
target_ulong helper_mfc0_watchlo (uint32_t sel)
1029
{
1030
    return (int32_t)env->CP0_WatchLo[sel];
1031
}
1032

    
1033
target_ulong helper_mfc0_watchhi (uint32_t sel)
1034
{
1035
    return env->CP0_WatchHi[sel];
1036
}
1037

    
1038
target_ulong helper_mfc0_debug (void)
1039
{
1040
    target_ulong t0 = env->CP0_Debug;
1041
    if (env->hflags & MIPS_HFLAG_DM)
1042
        t0 |= 1 << CP0DB_DM;
1043

    
1044
    return t0;
1045
}
1046

    
1047
target_ulong helper_mftc0_debug(void)
1048
{
1049
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1050
    int32_t tcstatus;
1051
    CPUState *other = mips_cpu_map_tc(&other_tc);
1052

    
1053
    if (other_tc == other->current_tc)
1054
        tcstatus = other->active_tc.CP0_Debug_tcstatus;
1055
    else
1056
        tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
1057

    
1058
    /* XXX: Might be wrong, check with EJTAG spec. */
1059
    return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1060
            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1061
}
1062

    
1063
#if defined(TARGET_MIPS64)
1064
target_ulong helper_dmfc0_tcrestart (void)
1065
{
1066
    return env->active_tc.PC;
1067
}
1068

    
1069
target_ulong helper_dmfc0_tchalt (void)
1070
{
1071
    return env->active_tc.CP0_TCHalt;
1072
}
1073

    
1074
target_ulong helper_dmfc0_tccontext (void)
1075
{
1076
    return env->active_tc.CP0_TCContext;
1077
}
1078

    
1079
target_ulong helper_dmfc0_tcschedule (void)
1080
{
1081
    return env->active_tc.CP0_TCSchedule;
1082
}
1083

    
1084
target_ulong helper_dmfc0_tcschefback (void)
1085
{
1086
    return env->active_tc.CP0_TCScheFBack;
1087
}
1088

    
1089
target_ulong helper_dmfc0_lladdr (void)
1090
{
1091
    return env->lladdr >> env->CP0_LLAddr_shift;
1092
}
1093

    
1094
target_ulong helper_dmfc0_watchlo (uint32_t sel)
1095
{
1096
    return env->CP0_WatchLo[sel];
1097
}
1098
#endif /* TARGET_MIPS64 */
1099

    
1100
void helper_mtc0_index (target_ulong arg1)
1101
{
1102
    int num = 1;
1103
    unsigned int tmp = env->tlb->nb_tlb;
1104

    
1105
    do {
1106
        tmp >>= 1;
1107
        num <<= 1;
1108
    } while (tmp);
1109
    env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
1110
}
1111

    
1112
void helper_mtc0_mvpcontrol (target_ulong arg1)
1113
{
1114
    uint32_t mask = 0;
1115
    uint32_t newval;
1116

    
1117
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1118
        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1119
                (1 << CP0MVPCo_EVP);
1120
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1121
        mask |= (1 << CP0MVPCo_STLB);
1122
    newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1123

    
1124
    // TODO: Enable/disable shared TLB, enable/disable VPEs.
1125

    
1126
    env->mvp->CP0_MVPControl = newval;
1127
}
1128

    
1129
void helper_mtc0_vpecontrol (target_ulong arg1)
1130
{
1131
    uint32_t mask;
1132
    uint32_t newval;
1133

    
1134
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1135
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1136
    newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1137

    
1138
    /* Yield scheduler intercept not implemented. */
1139
    /* Gating storage scheduler intercept not implemented. */
1140

    
1141
    // TODO: Enable/disable TCs.
1142

    
1143
    env->CP0_VPEControl = newval;
1144
}
1145

    
1146
void helper_mtc0_vpeconf0 (target_ulong arg1)
1147
{
1148
    uint32_t mask = 0;
1149
    uint32_t newval;
1150

    
1151
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1152
        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1153
            mask |= (0xff << CP0VPEC0_XTC);
1154
        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1155
    }
1156
    newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1157

    
1158
    // TODO: TC exclusive handling due to ERL/EXL.
1159

    
1160
    env->CP0_VPEConf0 = newval;
1161
}
1162

    
1163
void helper_mtc0_vpeconf1 (target_ulong arg1)
1164
{
1165
    uint32_t mask = 0;
1166
    uint32_t newval;
1167

    
1168
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1169
        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1170
                (0xff << CP0VPEC1_NCP1);
1171
    newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1172

    
1173
    /* UDI not implemented. */
1174
    /* CP2 not implemented. */
1175

    
1176
    // TODO: Handle FPU (CP1) binding.
1177

    
1178
    env->CP0_VPEConf1 = newval;
1179
}
1180

    
1181
void helper_mtc0_yqmask (target_ulong arg1)
1182
{
1183
    /* Yield qualifier inputs not implemented. */
1184
    env->CP0_YQMask = 0x00000000;
1185
}
1186

    
1187
void helper_mtc0_vpeopt (target_ulong arg1)
1188
{
1189
    env->CP0_VPEOpt = arg1 & 0x0000ffff;
1190
}
1191

    
1192
void helper_mtc0_entrylo0 (target_ulong arg1)
1193
{
1194
    /* Large physaddr (PABITS) not implemented */
1195
    /* 1k pages not implemented */
1196
    env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1197
}
1198

    
1199
void helper_mtc0_tcstatus (target_ulong arg1)
1200
{
1201
    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1202
    uint32_t newval;
1203

    
1204
    newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1205

    
1206
    env->active_tc.CP0_TCStatus = newval;
1207
    sync_c0_tcstatus(env, env->current_tc, newval);
1208
}
1209

    
1210
void helper_mttc0_tcstatus (target_ulong arg1)
1211
{
1212
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1213
    CPUState *other = mips_cpu_map_tc(&other_tc);
1214

    
1215
    if (other_tc == other->current_tc)
1216
        other->active_tc.CP0_TCStatus = arg1;
1217
    else
1218
        other->tcs[other_tc].CP0_TCStatus = arg1;
1219
    sync_c0_tcstatus(other, other_tc, arg1);
1220
}
1221

    
1222
void helper_mtc0_tcbind (target_ulong arg1)
1223
{
1224
    uint32_t mask = (1 << CP0TCBd_TBE);
1225
    uint32_t newval;
1226

    
1227
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1228
        mask |= (1 << CP0TCBd_CurVPE);
1229
    newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1230
    env->active_tc.CP0_TCBind = newval;
1231
}
1232

    
1233
void helper_mttc0_tcbind (target_ulong arg1)
1234
{
1235
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1236
    uint32_t mask = (1 << CP0TCBd_TBE);
1237
    uint32_t newval;
1238
    CPUState *other = mips_cpu_map_tc(&other_tc);
1239

    
1240
    if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1241
        mask |= (1 << CP0TCBd_CurVPE);
1242
    if (other_tc == other->current_tc) {
1243
        newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1244
        other->active_tc.CP0_TCBind = newval;
1245
    } else {
1246
        newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1247
        other->tcs[other_tc].CP0_TCBind = newval;
1248
    }
1249
}
1250

    
1251
void helper_mtc0_tcrestart (target_ulong arg1)
1252
{
1253
    env->active_tc.PC = arg1;
1254
    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1255
    env->lladdr = 0ULL;
1256
    /* MIPS16 not implemented. */
1257
}
1258

    
1259
void helper_mttc0_tcrestart (target_ulong arg1)
1260
{
1261
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1262
    CPUState *other = mips_cpu_map_tc(&other_tc);
1263

    
1264
    if (other_tc == other->current_tc) {
1265
        other->active_tc.PC = arg1;
1266
        other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1267
        other->lladdr = 0ULL;
1268
        /* MIPS16 not implemented. */
1269
    } else {
1270
        other->tcs[other_tc].PC = arg1;
1271
        other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1272
        other->lladdr = 0ULL;
1273
        /* MIPS16 not implemented. */
1274
    }
1275
}
1276

    
1277
void helper_mtc0_tchalt (target_ulong arg1)
1278
{
1279
    env->active_tc.CP0_TCHalt = arg1 & 0x1;
1280

    
1281
    // TODO: Halt TC / Restart (if allocated+active) TC.
1282
}
1283

    
1284
void helper_mttc0_tchalt (target_ulong arg1)
1285
{
1286
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1287
    CPUState *other = mips_cpu_map_tc(&other_tc);
1288

    
1289
    // TODO: Halt TC / Restart (if allocated+active) TC.
1290

    
1291
    if (other_tc == other->current_tc)
1292
        other->active_tc.CP0_TCHalt = arg1;
1293
    else
1294
        other->tcs[other_tc].CP0_TCHalt = arg1;
1295
}
1296

    
1297
void helper_mtc0_tccontext (target_ulong arg1)
1298
{
1299
    env->active_tc.CP0_TCContext = arg1;
1300
}
1301

    
1302
void helper_mttc0_tccontext (target_ulong arg1)
1303
{
1304
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1305
    CPUState *other = mips_cpu_map_tc(&other_tc);
1306

    
1307
    if (other_tc == other->current_tc)
1308
        other->active_tc.CP0_TCContext = arg1;
1309
    else
1310
        other->tcs[other_tc].CP0_TCContext = arg1;
1311
}
1312

    
1313
void helper_mtc0_tcschedule (target_ulong arg1)
1314
{
1315
    env->active_tc.CP0_TCSchedule = arg1;
1316
}
1317

    
1318
void helper_mttc0_tcschedule (target_ulong arg1)
1319
{
1320
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1321
    CPUState *other = mips_cpu_map_tc(&other_tc);
1322

    
1323
    if (other_tc == other->current_tc)
1324
        other->active_tc.CP0_TCSchedule = arg1;
1325
    else
1326
        other->tcs[other_tc].CP0_TCSchedule = arg1;
1327
}
1328

    
1329
void helper_mtc0_tcschefback (target_ulong arg1)
1330
{
1331
    env->active_tc.CP0_TCScheFBack = arg1;
1332
}
1333

    
1334
void helper_mttc0_tcschefback (target_ulong arg1)
1335
{
1336
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1337
    CPUState *other = mips_cpu_map_tc(&other_tc);
1338

    
1339
    if (other_tc == other->current_tc)
1340
        other->active_tc.CP0_TCScheFBack = arg1;
1341
    else
1342
        other->tcs[other_tc].CP0_TCScheFBack = arg1;
1343
}
1344

    
1345
void helper_mtc0_entrylo1 (target_ulong arg1)
1346
{
1347
    /* Large physaddr (PABITS) not implemented */
1348
    /* 1k pages not implemented */
1349
    env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1350
}
1351

    
1352
void helper_mtc0_context (target_ulong arg1)
1353
{
1354
    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1355
}
1356

    
1357
void helper_mtc0_pagemask (target_ulong arg1)
1358
{
1359
    /* 1k pages not implemented */
1360
    env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1361
}
1362

    
1363
void helper_mtc0_pagegrain (target_ulong arg1)
1364
{
1365
    /* SmartMIPS not implemented */
1366
    /* Large physaddr (PABITS) not implemented */
1367
    /* 1k pages not implemented */
1368
    env->CP0_PageGrain = 0;
1369
}
1370

    
1371
void helper_mtc0_wired (target_ulong arg1)
1372
{
1373
    env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1374
}
1375

    
1376
void helper_mtc0_srsconf0 (target_ulong arg1)
1377
{
1378
    env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1379
}
1380

    
1381
void helper_mtc0_srsconf1 (target_ulong arg1)
1382
{
1383
    env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1384
}
1385

    
1386
void helper_mtc0_srsconf2 (target_ulong arg1)
1387
{
1388
    env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1389
}
1390

    
1391
void helper_mtc0_srsconf3 (target_ulong arg1)
1392
{
1393
    env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1394
}
1395

    
1396
void helper_mtc0_srsconf4 (target_ulong arg1)
1397
{
1398
    env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1399
}
1400

    
1401
void helper_mtc0_hwrena (target_ulong arg1)
1402
{
1403
    env->CP0_HWREna = arg1 & 0x0000000F;
1404
}
1405

    
1406
void helper_mtc0_count (target_ulong arg1)
1407
{
1408
    cpu_mips_store_count(env, arg1);
1409
}
1410

    
1411
void helper_mtc0_entryhi (target_ulong arg1)
1412
{
1413
    target_ulong old, val;
1414

    
1415
    /* 1k pages not implemented */
1416
    val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1417
#if defined(TARGET_MIPS64)
1418
    val &= env->SEGMask;
1419
#endif
1420
    old = env->CP0_EntryHi;
1421
    env->CP0_EntryHi = val;
1422
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1423
        sync_c0_entryhi(env, env->current_tc);
1424
    }
1425
    /* If the ASID changes, flush qemu's TLB.  */
1426
    if ((old & 0xFF) != (val & 0xFF))
1427
        cpu_mips_tlb_flush(env, 1);
1428
}
1429

    
1430
void helper_mttc0_entryhi(target_ulong arg1)
1431
{
1432
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1433
    CPUState *other = mips_cpu_map_tc(&other_tc);
1434

    
1435
    other->CP0_EntryHi = arg1;
1436
    sync_c0_entryhi(other, other_tc);
1437
}
1438

    
1439
void helper_mtc0_compare (target_ulong arg1)
1440
{
1441
    cpu_mips_store_compare(env, arg1);
1442
}
1443

    
1444
void helper_mtc0_status (target_ulong arg1)
1445
{
1446
    uint32_t val, old;
1447
    uint32_t mask = env->CP0_Status_rw_bitmask;
1448

    
1449
    val = arg1 & mask;
1450
    old = env->CP0_Status;
1451
    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1452
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1453
        sync_c0_status(env, env->current_tc);
1454
    } else {
1455
        compute_hflags(env);
1456
    }
1457

    
1458
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1459
        qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1460
                old, old & env->CP0_Cause & CP0Ca_IP_mask,
1461
                val, val & env->CP0_Cause & CP0Ca_IP_mask,
1462
                env->CP0_Cause);
1463
        switch (env->hflags & MIPS_HFLAG_KSU) {
1464
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1465
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1466
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1467
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1468
        }
1469
    }
1470
}
1471

    
1472
void helper_mttc0_status(target_ulong arg1)
1473
{
1474
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1475
    CPUState *other = mips_cpu_map_tc(&other_tc);
1476

    
1477
    other->CP0_Status = arg1 & ~0xf1000018;
1478
    sync_c0_status(other, other_tc);
1479
}
1480

    
1481
void helper_mtc0_intctl (target_ulong arg1)
1482
{
1483
    /* vectored interrupts not implemented, no performance counters. */
1484
    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (arg1 & 0x000002e0);
1485
}
1486

    
1487
void helper_mtc0_srsctl (target_ulong arg1)
1488
{
1489
    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1490
    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1491
}
1492

    
1493
void helper_mtc0_cause (target_ulong arg1)
1494
{
1495
    uint32_t mask = 0x00C00300;
1496
    uint32_t old = env->CP0_Cause;
1497
    int i;
1498

    
1499
    if (env->insn_flags & ISA_MIPS32R2)
1500
        mask |= 1 << CP0Ca_DC;
1501

    
1502
    env->CP0_Cause = (env->CP0_Cause & ~mask) | (arg1 & mask);
1503

    
1504
    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1505
        if (env->CP0_Cause & (1 << CP0Ca_DC))
1506
            cpu_mips_stop_count(env);
1507
        else
1508
            cpu_mips_start_count(env);
1509
    }
1510

    
1511
    /* Set/reset software interrupts */
1512
    for (i = 0 ; i < 2 ; i++) {
1513
        if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1514
            cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i)));
1515
        }
1516
    }
1517
}
1518

    
1519
void helper_mtc0_ebase (target_ulong arg1)
1520
{
1521
    /* vectored interrupts not implemented */
1522
    env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1523
}
1524

    
1525
void helper_mtc0_config0 (target_ulong arg1)
1526
{
1527
    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1528
}
1529

    
1530
void helper_mtc0_config2 (target_ulong arg1)
1531
{
1532
    /* tertiary/secondary caches not implemented */
1533
    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1534
}
1535

    
1536
void helper_mtc0_lladdr (target_ulong arg1)
1537
{
1538
    target_long mask = env->CP0_LLAddr_rw_bitmask;
1539
    arg1 = arg1 << env->CP0_LLAddr_shift;
1540
    env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1541
}
1542

    
1543
void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1544
{
1545
    /* Watch exceptions for instructions, data loads, data stores
1546
       not implemented. */
1547
    env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1548
}
1549

    
1550
void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1551
{
1552
    env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1553
    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1554
}
1555

    
1556
void helper_mtc0_xcontext (target_ulong arg1)
1557
{
1558
    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1559
    env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1560
}
1561

    
1562
void helper_mtc0_framemask (target_ulong arg1)
1563
{
1564
    env->CP0_Framemask = arg1; /* XXX */
1565
}
1566

    
1567
void helper_mtc0_debug (target_ulong arg1)
1568
{
1569
    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1570
    if (arg1 & (1 << CP0DB_DM))
1571
        env->hflags |= MIPS_HFLAG_DM;
1572
    else
1573
        env->hflags &= ~MIPS_HFLAG_DM;
1574
}
1575

    
1576
void helper_mttc0_debug(target_ulong arg1)
1577
{
1578
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1579
    uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1580
    CPUState *other = mips_cpu_map_tc(&other_tc);
1581

    
1582
    /* XXX: Might be wrong, check with EJTAG spec. */
1583
    if (other_tc == other->current_tc)
1584
        other->active_tc.CP0_Debug_tcstatus = val;
1585
    else
1586
        other->tcs[other_tc].CP0_Debug_tcstatus = val;
1587
    other->CP0_Debug = (other->CP0_Debug &
1588
                     ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1589
                     (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1590
}
1591

    
1592
void helper_mtc0_performance0 (target_ulong arg1)
1593
{
1594
    env->CP0_Performance0 = arg1 & 0x000007ff;
1595
}
1596

    
1597
void helper_mtc0_taglo (target_ulong arg1)
1598
{
1599
    env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1600
}
1601

    
1602
void helper_mtc0_datalo (target_ulong arg1)
1603
{
1604
    env->CP0_DataLo = arg1; /* XXX */
1605
}
1606

    
1607
void helper_mtc0_taghi (target_ulong arg1)
1608
{
1609
    env->CP0_TagHi = arg1; /* XXX */
1610
}
1611

    
1612
void helper_mtc0_datahi (target_ulong arg1)
1613
{
1614
    env->CP0_DataHi = arg1; /* XXX */
1615
}
1616

    
1617
/* MIPS MT functions */
1618
target_ulong helper_mftgpr(uint32_t sel)
1619
{
1620
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1621
    CPUState *other = mips_cpu_map_tc(&other_tc);
1622

    
1623
    if (other_tc == other->current_tc)
1624
        return other->active_tc.gpr[sel];
1625
    else
1626
        return other->tcs[other_tc].gpr[sel];
1627
}
1628

    
1629
target_ulong helper_mftlo(uint32_t sel)
1630
{
1631
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1632
    CPUState *other = mips_cpu_map_tc(&other_tc);
1633

    
1634
    if (other_tc == other->current_tc)
1635
        return other->active_tc.LO[sel];
1636
    else
1637
        return other->tcs[other_tc].LO[sel];
1638
}
1639

    
1640
target_ulong helper_mfthi(uint32_t sel)
1641
{
1642
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1643
    CPUState *other = mips_cpu_map_tc(&other_tc);
1644

    
1645
    if (other_tc == other->current_tc)
1646
        return other->active_tc.HI[sel];
1647
    else
1648
        return other->tcs[other_tc].HI[sel];
1649
}
1650

    
1651
target_ulong helper_mftacx(uint32_t sel)
1652
{
1653
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1654
    CPUState *other = mips_cpu_map_tc(&other_tc);
1655

    
1656
    if (other_tc == other->current_tc)
1657
        return other->active_tc.ACX[sel];
1658
    else
1659
        return other->tcs[other_tc].ACX[sel];
1660
}
1661

    
1662
target_ulong helper_mftdsp(void)
1663
{
1664
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1665
    CPUState *other = mips_cpu_map_tc(&other_tc);
1666

    
1667
    if (other_tc == other->current_tc)
1668
        return other->active_tc.DSPControl;
1669
    else
1670
        return other->tcs[other_tc].DSPControl;
1671
}
1672

    
1673
void helper_mttgpr(target_ulong arg1, uint32_t sel)
1674
{
1675
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1676
    CPUState *other = mips_cpu_map_tc(&other_tc);
1677

    
1678
    if (other_tc == other->current_tc)
1679
        other->active_tc.gpr[sel] = arg1;
1680
    else
1681
        other->tcs[other_tc].gpr[sel] = arg1;
1682
}
1683

    
1684
void helper_mttlo(target_ulong arg1, uint32_t sel)
1685
{
1686
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1687
    CPUState *other = mips_cpu_map_tc(&other_tc);
1688

    
1689
    if (other_tc == other->current_tc)
1690
        other->active_tc.LO[sel] = arg1;
1691
    else
1692
        other->tcs[other_tc].LO[sel] = arg1;
1693
}
1694

    
1695
void helper_mtthi(target_ulong arg1, uint32_t sel)
1696
{
1697
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1698
    CPUState *other = mips_cpu_map_tc(&other_tc);
1699

    
1700
    if (other_tc == other->current_tc)
1701
        other->active_tc.HI[sel] = arg1;
1702
    else
1703
        other->tcs[other_tc].HI[sel] = arg1;
1704
}
1705

    
1706
void helper_mttacx(target_ulong arg1, uint32_t sel)
1707
{
1708
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1709
    CPUState *other = mips_cpu_map_tc(&other_tc);
1710

    
1711
    if (other_tc == other->current_tc)
1712
        other->active_tc.ACX[sel] = arg1;
1713
    else
1714
        other->tcs[other_tc].ACX[sel] = arg1;
1715
}
1716

    
1717
void helper_mttdsp(target_ulong arg1)
1718
{
1719
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1720
    CPUState *other = mips_cpu_map_tc(&other_tc);
1721

    
1722
    if (other_tc == other->current_tc)
1723
        other->active_tc.DSPControl = arg1;
1724
    else
1725
        other->tcs[other_tc].DSPControl = arg1;
1726
}
1727

    
1728
/* MIPS MT functions */
1729
target_ulong helper_dmt(void)
1730
{
1731
    // TODO
1732
     return 0;
1733
}
1734

    
1735
target_ulong helper_emt(void)
1736
{
1737
    // TODO
1738
    return 0;
1739
}
1740

    
1741
target_ulong helper_dvpe(void)
1742
{
1743
    // TODO
1744
    return 0;
1745
}
1746

    
1747
target_ulong helper_evpe(void)
1748
{
1749
    // TODO
1750
    return 0;
1751
}
1752
#endif /* !CONFIG_USER_ONLY */
1753

    
1754
void helper_fork(target_ulong arg1, target_ulong arg2)
1755
{
1756
    // arg1 = rt, arg2 = rs
1757
    arg1 = 0;
1758
    // TODO: store to TC register
1759
}
1760

    
1761
target_ulong helper_yield(target_ulong arg)
1762
{
1763
    target_long arg1 = arg;
1764

    
1765
    if (arg1 < 0) {
1766
        /* No scheduling policy implemented. */
1767
        if (arg1 != -2) {
1768
            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1769
                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1770
                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1771
                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1772
                helper_raise_exception(EXCP_THREAD);
1773
            }
1774
        }
1775
    } else if (arg1 == 0) {
1776
        if (0 /* TODO: TC underflow */) {
1777
            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1778
            helper_raise_exception(EXCP_THREAD);
1779
        } else {
1780
            // TODO: Deallocate TC
1781
        }
1782
    } else if (arg1 > 0) {
1783
        /* Yield qualifier inputs not implemented. */
1784
        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1785
        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1786
        helper_raise_exception(EXCP_THREAD);
1787
    }
1788
    return env->CP0_YQMask;
1789
}
1790

    
1791
#ifndef CONFIG_USER_ONLY
1792
/* TLB management */
1793
static void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1794
{
1795
    /* Flush qemu's TLB and discard all shadowed entries.  */
1796
    tlb_flush (env, flush_global);
1797
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
1798
}
1799

    
1800
static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1801
{
1802
    /* Discard entries from env->tlb[first] onwards.  */
1803
    while (env->tlb->tlb_in_use > first) {
1804
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1805
    }
1806
}
1807

    
1808
static void r4k_fill_tlb (int idx)
1809
{
1810
    r4k_tlb_t *tlb;
1811

    
1812
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1813
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1814
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1815
#if defined(TARGET_MIPS64)
1816
    tlb->VPN &= env->SEGMask;
1817
#endif
1818
    tlb->ASID = env->CP0_EntryHi & 0xFF;
1819
    tlb->PageMask = env->CP0_PageMask;
1820
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1821
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1822
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1823
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1824
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1825
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1826
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1827
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1828
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1829
}
1830

    
1831
void r4k_helper_tlbwi (void)
1832
{
1833
    int idx;
1834

    
1835
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1836

    
1837
    /* Discard cached TLB entries.  We could avoid doing this if the
1838
       tlbwi is just upgrading access permissions on the current entry;
1839
       that might be a further win.  */
1840
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1841

    
1842
    r4k_invalidate_tlb(env, idx, 0);
1843
    r4k_fill_tlb(idx);
1844
}
1845

    
1846
void r4k_helper_tlbwr (void)
1847
{
1848
    int r = cpu_mips_get_random(env);
1849

    
1850
    r4k_invalidate_tlb(env, r, 1);
1851
    r4k_fill_tlb(r);
1852
}
1853

    
1854
void r4k_helper_tlbp (void)
1855
{
1856
    r4k_tlb_t *tlb;
1857
    target_ulong mask;
1858
    target_ulong tag;
1859
    target_ulong VPN;
1860
    uint8_t ASID;
1861
    int i;
1862

    
1863
    ASID = env->CP0_EntryHi & 0xFF;
1864
    for (i = 0; i < env->tlb->nb_tlb; i++) {
1865
        tlb = &env->tlb->mmu.r4k.tlb[i];
1866
        /* 1k pages are not supported. */
1867
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1868
        tag = env->CP0_EntryHi & ~mask;
1869
        VPN = tlb->VPN & ~mask;
1870
        /* Check ASID, virtual page number & size */
1871
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1872
            /* TLB match */
1873
            env->CP0_Index = i;
1874
            break;
1875
        }
1876
    }
1877
    if (i == env->tlb->nb_tlb) {
1878
        /* No match.  Discard any shadow entries, if any of them match.  */
1879
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1880
            tlb = &env->tlb->mmu.r4k.tlb[i];
1881
            /* 1k pages are not supported. */
1882
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1883
            tag = env->CP0_EntryHi & ~mask;
1884
            VPN = tlb->VPN & ~mask;
1885
            /* Check ASID, virtual page number & size */
1886
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1887
                r4k_mips_tlb_flush_extra (env, i);
1888
                break;
1889
            }
1890
        }
1891

    
1892
        env->CP0_Index |= 0x80000000;
1893
    }
1894
}
1895

    
1896
void r4k_helper_tlbr (void)
1897
{
1898
    r4k_tlb_t *tlb;
1899
    uint8_t ASID;
1900
    int idx;
1901

    
1902
    ASID = env->CP0_EntryHi & 0xFF;
1903
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1904
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1905

    
1906
    /* If this will change the current ASID, flush qemu's TLB.  */
1907
    if (ASID != tlb->ASID)
1908
        cpu_mips_tlb_flush (env, 1);
1909

    
1910
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1911

    
1912
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1913
    env->CP0_PageMask = tlb->PageMask;
1914
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1915
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1916
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1917
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1918
}
1919

    
1920
void helper_tlbwi(void)
1921
{
1922
    env->tlb->helper_tlbwi();
1923
}
1924

    
1925
void helper_tlbwr(void)
1926
{
1927
    env->tlb->helper_tlbwr();
1928
}
1929

    
1930
void helper_tlbp(void)
1931
{
1932
    env->tlb->helper_tlbp();
1933
}
1934

    
1935
void helper_tlbr(void)
1936
{
1937
    env->tlb->helper_tlbr();
1938
}
1939

    
1940
/* Specials */
1941
target_ulong helper_di (void)
1942
{
1943
    target_ulong t0 = env->CP0_Status;
1944

    
1945
    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1946
    return t0;
1947
}
1948

    
1949
target_ulong helper_ei (void)
1950
{
1951
    target_ulong t0 = env->CP0_Status;
1952

    
1953
    env->CP0_Status = t0 | (1 << CP0St_IE);
1954
    return t0;
1955
}
1956

    
1957
static void debug_pre_eret (void)
1958
{
1959
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1960
        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1961
                env->active_tc.PC, env->CP0_EPC);
1962
        if (env->CP0_Status & (1 << CP0St_ERL))
1963
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1964
        if (env->hflags & MIPS_HFLAG_DM)
1965
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1966
        qemu_log("\n");
1967
    }
1968
}
1969

    
1970
static void debug_post_eret (void)
1971
{
1972
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1973
        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1974
                env->active_tc.PC, env->CP0_EPC);
1975
        if (env->CP0_Status & (1 << CP0St_ERL))
1976
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1977
        if (env->hflags & MIPS_HFLAG_DM)
1978
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1979
        switch (env->hflags & MIPS_HFLAG_KSU) {
1980
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1981
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1982
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1983
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1984
        }
1985
    }
1986
}
1987

    
1988
static void set_pc (target_ulong error_pc)
1989
{
1990
    env->active_tc.PC = error_pc & ~(target_ulong)1;
1991
    if (error_pc & 1) {
1992
        env->hflags |= MIPS_HFLAG_M16;
1993
    } else {
1994
        env->hflags &= ~(MIPS_HFLAG_M16);
1995
    }
1996
}
1997

    
1998
void helper_eret (void)
1999
{
2000
    debug_pre_eret();
2001
    if (env->CP0_Status & (1 << CP0St_ERL)) {
2002
        set_pc(env->CP0_ErrorEPC);
2003
        env->CP0_Status &= ~(1 << CP0St_ERL);
2004
    } else {
2005
        set_pc(env->CP0_EPC);
2006
        env->CP0_Status &= ~(1 << CP0St_EXL);
2007
    }
2008
    compute_hflags(env);
2009
    debug_post_eret();
2010
    env->lladdr = 1;
2011
}
2012

    
2013
void helper_deret (void)
2014
{
2015
    debug_pre_eret();
2016
    set_pc(env->CP0_DEPC);
2017

    
2018
    env->hflags &= MIPS_HFLAG_DM;
2019
    compute_hflags(env);
2020
    debug_post_eret();
2021
    env->lladdr = 1;
2022
}
2023
#endif /* !CONFIG_USER_ONLY */
2024

    
2025
target_ulong helper_rdhwr_cpunum(void)
2026
{
2027
    if ((env->hflags & MIPS_HFLAG_CP0) ||
2028
        (env->CP0_HWREna & (1 << 0)))
2029
        return env->CP0_EBase & 0x3ff;
2030
    else
2031
        helper_raise_exception(EXCP_RI);
2032

    
2033
    return 0;
2034
}
2035

    
2036
target_ulong helper_rdhwr_synci_step(void)
2037
{
2038
    if ((env->hflags & MIPS_HFLAG_CP0) ||
2039
        (env->CP0_HWREna & (1 << 1)))
2040
        return env->SYNCI_Step;
2041
    else
2042
        helper_raise_exception(EXCP_RI);
2043

    
2044
    return 0;
2045
}
2046

    
2047
target_ulong helper_rdhwr_cc(void)
2048
{
2049
    if ((env->hflags & MIPS_HFLAG_CP0) ||
2050
        (env->CP0_HWREna & (1 << 2)))
2051
        return env->CP0_Count;
2052
    else
2053
        helper_raise_exception(EXCP_RI);
2054

    
2055
    return 0;
2056
}
2057

    
2058
target_ulong helper_rdhwr_ccres(void)
2059
{
2060
    if ((env->hflags & MIPS_HFLAG_CP0) ||
2061
        (env->CP0_HWREna & (1 << 3)))
2062
        return env->CCRes;
2063
    else
2064
        helper_raise_exception(EXCP_RI);
2065

    
2066
    return 0;
2067
}
2068

    
2069
void helper_pmon (int function)
2070
{
2071
    function /= 2;
2072
    switch (function) {
2073
    case 2: /* TODO: char inbyte(int waitflag); */
2074
        if (env->active_tc.gpr[4] == 0)
2075
            env->active_tc.gpr[2] = -1;
2076
        /* Fall through */
2077
    case 11: /* TODO: char inbyte (void); */
2078
        env->active_tc.gpr[2] = -1;
2079
        break;
2080
    case 3:
2081
    case 12:
2082
        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2083
        break;
2084
    case 17:
2085
        break;
2086
    case 158:
2087
        {
2088
            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
2089
            printf("%s", fmt);
2090
        }
2091
        break;
2092
    }
2093
}
2094

    
2095
void helper_wait (void)
2096
{
2097
    env->halted = 1;
2098
    helper_raise_exception(EXCP_HLT);
2099
}
2100

    
2101
#if !defined(CONFIG_USER_ONLY)
2102

    
2103
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
2104

    
2105
#define MMUSUFFIX _mmu
2106
#define ALIGNED_ONLY
2107

    
2108
#define SHIFT 0
2109
#include "softmmu_template.h"
2110

    
2111
#define SHIFT 1
2112
#include "softmmu_template.h"
2113

    
2114
#define SHIFT 2
2115
#include "softmmu_template.h"
2116

    
2117
#define SHIFT 3
2118
#include "softmmu_template.h"
2119

    
2120
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
2121
{
2122
    env->CP0_BadVAddr = addr;
2123
    do_restore_state (retaddr);
2124
    helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
2125
}
2126

    
2127
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2128
{
2129
    TranslationBlock *tb;
2130
    CPUState *saved_env;
2131
    unsigned long pc;
2132
    int ret;
2133

    
2134
    /* XXX: hack to restore env in all cases, even if not called from
2135
       generated code */
2136
    saved_env = env;
2137
    env = cpu_single_env;
2138
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2139
    if (ret) {
2140
        if (retaddr) {
2141
            /* now we have a real cpu fault */
2142
            pc = (unsigned long)retaddr;
2143
            tb = tb_find_pc(pc);
2144
            if (tb) {
2145
                /* the PC is inside the translated code. It means that we have
2146
                   a virtual CPU fault */
2147
                cpu_restore_state(tb, env, pc);
2148
            }
2149
        }
2150
        helper_raise_exception_err(env->exception_index, env->error_code);
2151
    }
2152
    env = saved_env;
2153
}
2154

    
2155
void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
2156
                           int is_write, int is_exec, int unused, int size)
2157
{
2158
    env = env1;
2159

    
2160
    if (is_exec)
2161
        helper_raise_exception(EXCP_IBE);
2162
    else
2163
        helper_raise_exception(EXCP_DBE);
2164
}
2165
#endif /* !CONFIG_USER_ONLY */
2166

    
2167
/* Complex FPU operations which may need stack space. */
2168

    
2169
#define FLOAT_ONE32 make_float32(0x3f8 << 20)
2170
#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2171
#define FLOAT_TWO32 make_float32(1 << 30)
2172
#define FLOAT_TWO64 make_float64(1ULL << 62)
2173
#define FLOAT_QNAN32 0x7fbfffff
2174
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2175
#define FLOAT_SNAN32 0x7fffffff
2176
#define FLOAT_SNAN64 0x7fffffffffffffffULL
2177

    
2178
/* convert MIPS rounding mode in FCR31 to IEEE library */
2179
static unsigned int ieee_rm[] = {
2180
    float_round_nearest_even,
2181
    float_round_to_zero,
2182
    float_round_up,
2183
    float_round_down
2184
};
2185

    
2186
#define RESTORE_ROUNDING_MODE \
2187
    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2188

    
2189
#define RESTORE_FLUSH_MODE \
2190
    set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2191

    
2192
target_ulong helper_cfc1 (uint32_t reg)
2193
{
2194
    target_ulong arg1;
2195

    
2196
    switch (reg) {
2197
    case 0:
2198
        arg1 = (int32_t)env->active_fpu.fcr0;
2199
        break;
2200
    case 25:
2201
        arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2202
        break;
2203
    case 26:
2204
        arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2205
        break;
2206
    case 28:
2207
        arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2208
        break;
2209
    default:
2210
        arg1 = (int32_t)env->active_fpu.fcr31;
2211
        break;
2212
    }
2213

    
2214
    return arg1;
2215
}
2216

    
2217
void helper_ctc1 (target_ulong arg1, uint32_t reg)
2218
{
2219
    switch(reg) {
2220
    case 25:
2221
        if (arg1 & 0xffffff00)
2222
            return;
2223
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2224
                     ((arg1 & 0x1) << 23);
2225
        break;
2226
    case 26:
2227
        if (arg1 & 0x007c0000)
2228
            return;
2229
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2230
        break;
2231
    case 28:
2232
        if (arg1 & 0x007c0000)
2233
            return;
2234
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2235
                     ((arg1 & 0x4) << 22);
2236
        break;
2237
    case 31:
2238
        if (arg1 & 0x007c0000)
2239
            return;
2240
        env->active_fpu.fcr31 = arg1;
2241
        break;
2242
    default:
2243
        return;
2244
    }
2245
    /* set rounding mode */
2246
    RESTORE_ROUNDING_MODE;
2247
    /* set flush-to-zero mode */
2248
    RESTORE_FLUSH_MODE;
2249
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2250
    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2251
        helper_raise_exception(EXCP_FPE);
2252
}
2253

    
2254
static inline int ieee_ex_to_mips(int xcpt)
2255
{
2256
    int ret = 0;
2257
    if (xcpt) {
2258
        if (xcpt & float_flag_invalid) {
2259
            ret |= FP_INVALID;
2260
        }
2261
        if (xcpt & float_flag_overflow) {
2262
            ret |= FP_OVERFLOW;
2263
        }
2264
        if (xcpt & float_flag_underflow) {
2265
            ret |= FP_UNDERFLOW;
2266
        }
2267
        if (xcpt & float_flag_divbyzero) {
2268
            ret |= FP_DIV0;
2269
        }
2270
        if (xcpt & float_flag_inexact) {
2271
            ret |= FP_INEXACT;
2272
        }
2273
    }
2274
    return ret;
2275
}
2276

    
2277
static inline void update_fcr31(void)
2278
{
2279
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2280

    
2281
    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2282
    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2283
        helper_raise_exception(EXCP_FPE);
2284
    else
2285
        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2286
}
2287

    
2288
/* Float support.
2289
   Single precition routines have a "s" suffix, double precision a
2290
   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2291
   paired single lower "pl", paired single upper "pu".  */
2292

    
2293
/* unary operations, modifying fp status  */
2294
uint64_t helper_float_sqrt_d(uint64_t fdt0)
2295
{
2296
    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2297
}
2298

    
2299
uint32_t helper_float_sqrt_s(uint32_t fst0)
2300
{
2301
    return float32_sqrt(fst0, &env->active_fpu.fp_status);
2302
}
2303

    
2304
uint64_t helper_float_cvtd_s(uint32_t fst0)
2305
{
2306
    uint64_t fdt2;
2307

    
2308
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2309
    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2310
    update_fcr31();
2311
    return fdt2;
2312
}
2313

    
2314
uint64_t helper_float_cvtd_w(uint32_t wt0)
2315
{
2316
    uint64_t fdt2;
2317

    
2318
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2319
    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2320
    update_fcr31();
2321
    return fdt2;
2322
}
2323

    
2324
uint64_t helper_float_cvtd_l(uint64_t dt0)
2325
{
2326
    uint64_t fdt2;
2327

    
2328
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2329
    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2330
    update_fcr31();
2331
    return fdt2;
2332
}
2333

    
2334
uint64_t helper_float_cvtl_d(uint64_t fdt0)
2335
{
2336
    uint64_t dt2;
2337

    
2338
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2339
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2340
    update_fcr31();
2341
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2342
        dt2 = FLOAT_SNAN64;
2343
    return dt2;
2344
}
2345

    
2346
uint64_t helper_float_cvtl_s(uint32_t fst0)
2347
{
2348
    uint64_t dt2;
2349

    
2350
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2351
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2352
    update_fcr31();
2353
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2354
        dt2 = FLOAT_SNAN64;
2355
    return dt2;
2356
}
2357

    
2358
uint64_t helper_float_cvtps_pw(uint64_t dt0)
2359
{
2360
    uint32_t fst2;
2361
    uint32_t fsth2;
2362

    
2363
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2364
    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2365
    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2366
    update_fcr31();
2367
    return ((uint64_t)fsth2 << 32) | fst2;
2368
}
2369

    
2370
uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2371
{
2372
    uint32_t wt2;
2373
    uint32_t wth2;
2374

    
2375
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2376
    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2377
    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2378
    update_fcr31();
2379
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2380
        wt2 = FLOAT_SNAN32;
2381
        wth2 = FLOAT_SNAN32;
2382
    }
2383
    return ((uint64_t)wth2 << 32) | wt2;
2384
}
2385

    
2386
uint32_t helper_float_cvts_d(uint64_t fdt0)
2387
{
2388
    uint32_t fst2;
2389

    
2390
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2391
    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2392
    update_fcr31();
2393
    return fst2;
2394
}
2395

    
2396
uint32_t helper_float_cvts_w(uint32_t wt0)
2397
{
2398
    uint32_t fst2;
2399

    
2400
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2401
    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2402
    update_fcr31();
2403
    return fst2;
2404
}
2405

    
2406
uint32_t helper_float_cvts_l(uint64_t dt0)
2407
{
2408
    uint32_t fst2;
2409

    
2410
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2411
    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2412
    update_fcr31();
2413
    return fst2;
2414
}
2415

    
2416
uint32_t helper_float_cvts_pl(uint32_t wt0)
2417
{
2418
    uint32_t wt2;
2419

    
2420
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2421
    wt2 = wt0;
2422
    update_fcr31();
2423
    return wt2;
2424
}
2425

    
2426
uint32_t helper_float_cvts_pu(uint32_t wth0)
2427
{
2428
    uint32_t wt2;
2429

    
2430
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2431
    wt2 = wth0;
2432
    update_fcr31();
2433
    return wt2;
2434
}
2435

    
2436
uint32_t helper_float_cvtw_s(uint32_t fst0)
2437
{
2438
    uint32_t wt2;
2439

    
2440
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2441
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2442
    update_fcr31();
2443
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2444
        wt2 = FLOAT_SNAN32;
2445
    return wt2;
2446
}
2447

    
2448
uint32_t helper_float_cvtw_d(uint64_t fdt0)
2449
{
2450
    uint32_t wt2;
2451

    
2452
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2453
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2454
    update_fcr31();
2455
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2456
        wt2 = FLOAT_SNAN32;
2457
    return wt2;
2458
}
2459

    
2460
uint64_t helper_float_roundl_d(uint64_t fdt0)
2461
{
2462
    uint64_t dt2;
2463

    
2464
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2465
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2466
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2467
    RESTORE_ROUNDING_MODE;
2468
    update_fcr31();
2469
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2470
        dt2 = FLOAT_SNAN64;
2471
    return dt2;
2472
}
2473

    
2474
uint64_t helper_float_roundl_s(uint32_t fst0)
2475
{
2476
    uint64_t dt2;
2477

    
2478
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2479
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2480
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2481
    RESTORE_ROUNDING_MODE;
2482
    update_fcr31();
2483
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2484
        dt2 = FLOAT_SNAN64;
2485
    return dt2;
2486
}
2487

    
2488
uint32_t helper_float_roundw_d(uint64_t fdt0)
2489
{
2490
    uint32_t wt2;
2491

    
2492
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2493
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2494
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2495
    RESTORE_ROUNDING_MODE;
2496
    update_fcr31();
2497
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2498
        wt2 = FLOAT_SNAN32;
2499
    return wt2;
2500
}
2501

    
2502
uint32_t helper_float_roundw_s(uint32_t fst0)
2503
{
2504
    uint32_t wt2;
2505

    
2506
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2507
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2508
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2509
    RESTORE_ROUNDING_MODE;
2510
    update_fcr31();
2511
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2512
        wt2 = FLOAT_SNAN32;
2513
    return wt2;
2514
}
2515

    
2516
uint64_t helper_float_truncl_d(uint64_t fdt0)
2517
{
2518
    uint64_t dt2;
2519

    
2520
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2521
    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2522
    update_fcr31();
2523
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2524
        dt2 = FLOAT_SNAN64;
2525
    return dt2;
2526
}
2527

    
2528
uint64_t helper_float_truncl_s(uint32_t fst0)
2529
{
2530
    uint64_t dt2;
2531

    
2532
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2533
    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2534
    update_fcr31();
2535
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2536
        dt2 = FLOAT_SNAN64;
2537
    return dt2;
2538
}
2539

    
2540
uint32_t helper_float_truncw_d(uint64_t fdt0)
2541
{
2542
    uint32_t wt2;
2543

    
2544
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2545
    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2546
    update_fcr31();
2547
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2548
        wt2 = FLOAT_SNAN32;
2549
    return wt2;
2550
}
2551

    
2552
uint32_t helper_float_truncw_s(uint32_t fst0)
2553
{
2554
    uint32_t wt2;
2555

    
2556
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2557
    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2558
    update_fcr31();
2559
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2560
        wt2 = FLOAT_SNAN32;
2561
    return wt2;
2562
}
2563

    
2564
uint64_t helper_float_ceill_d(uint64_t fdt0)
2565
{
2566
    uint64_t dt2;
2567

    
2568
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2569
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2570
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2571
    RESTORE_ROUNDING_MODE;
2572
    update_fcr31();
2573
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2574
        dt2 = FLOAT_SNAN64;
2575
    return dt2;
2576
}
2577

    
2578
uint64_t helper_float_ceill_s(uint32_t fst0)
2579
{
2580
    uint64_t dt2;
2581

    
2582
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2583
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2584
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2585
    RESTORE_ROUNDING_MODE;
2586
    update_fcr31();
2587
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2588
        dt2 = FLOAT_SNAN64;
2589
    return dt2;
2590
}
2591

    
2592
uint32_t helper_float_ceilw_d(uint64_t fdt0)
2593
{
2594
    uint32_t wt2;
2595

    
2596
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2597
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2598
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2599
    RESTORE_ROUNDING_MODE;
2600
    update_fcr31();
2601
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2602
        wt2 = FLOAT_SNAN32;
2603
    return wt2;
2604
}
2605

    
2606
uint32_t helper_float_ceilw_s(uint32_t fst0)
2607
{
2608
    uint32_t wt2;
2609

    
2610
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2611
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2612
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2613
    RESTORE_ROUNDING_MODE;
2614
    update_fcr31();
2615
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2616
        wt2 = FLOAT_SNAN32;
2617
    return wt2;
2618
}
2619

    
2620
uint64_t helper_float_floorl_d(uint64_t fdt0)
2621
{
2622
    uint64_t dt2;
2623

    
2624
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2625
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2626
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2627
    RESTORE_ROUNDING_MODE;
2628
    update_fcr31();
2629
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2630
        dt2 = FLOAT_SNAN64;
2631
    return dt2;
2632
}
2633

    
2634
uint64_t helper_float_floorl_s(uint32_t fst0)
2635
{
2636
    uint64_t dt2;
2637

    
2638
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2639
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2640
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2641
    RESTORE_ROUNDING_MODE;
2642
    update_fcr31();
2643
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2644
        dt2 = FLOAT_SNAN64;
2645
    return dt2;
2646
}
2647

    
2648
uint32_t helper_float_floorw_d(uint64_t fdt0)
2649
{
2650
    uint32_t wt2;
2651

    
2652
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2653
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2654
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2655
    RESTORE_ROUNDING_MODE;
2656
    update_fcr31();
2657
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2658
        wt2 = FLOAT_SNAN32;
2659
    return wt2;
2660
}
2661

    
2662
uint32_t helper_float_floorw_s(uint32_t fst0)
2663
{
2664
    uint32_t wt2;
2665

    
2666
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2667
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2668
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2669
    RESTORE_ROUNDING_MODE;
2670
    update_fcr31();
2671
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2672
        wt2 = FLOAT_SNAN32;
2673
    return wt2;
2674
}
2675

    
2676
/* unary operations, not modifying fp status  */
2677
#define FLOAT_UNOP(name)                                       \
2678
uint64_t helper_float_ ## name ## _d(uint64_t fdt0)                \
2679
{                                                              \
2680
    return float64_ ## name(fdt0);                             \
2681
}                                                              \
2682
uint32_t helper_float_ ## name ## _s(uint32_t fst0)                \
2683
{                                                              \
2684
    return float32_ ## name(fst0);                             \
2685
}                                                              \
2686
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0)               \
2687
{                                                              \
2688
    uint32_t wt0;                                              \
2689
    uint32_t wth0;                                             \
2690
                                                               \
2691
    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2692
    wth0 = float32_ ## name(fdt0 >> 32);                       \
2693
    return ((uint64_t)wth0 << 32) | wt0;                       \
2694
}
2695
FLOAT_UNOP(abs)
2696
FLOAT_UNOP(chs)
2697
#undef FLOAT_UNOP
2698

    
2699
/* MIPS specific unary operations */
2700
uint64_t helper_float_recip_d(uint64_t fdt0)
2701
{
2702
    uint64_t fdt2;
2703

    
2704
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2705
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2706
    update_fcr31();
2707
    return fdt2;
2708
}
2709

    
2710
uint32_t helper_float_recip_s(uint32_t fst0)
2711
{
2712
    uint32_t fst2;
2713

    
2714
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2715
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2716
    update_fcr31();
2717
    return fst2;
2718
}
2719

    
2720
uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2721
{
2722
    uint64_t fdt2;
2723

    
2724
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2725
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2726
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2727
    update_fcr31();
2728
    return fdt2;
2729
}
2730

    
2731
uint32_t helper_float_rsqrt_s(uint32_t fst0)
2732
{
2733
    uint32_t fst2;
2734

    
2735
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2736
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2737
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2738
    update_fcr31();
2739
    return fst2;
2740
}
2741

    
2742
uint64_t helper_float_recip1_d(uint64_t fdt0)
2743
{
2744
    uint64_t fdt2;
2745

    
2746
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2747
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2748
    update_fcr31();
2749
    return fdt2;
2750
}
2751

    
2752
uint32_t helper_float_recip1_s(uint32_t fst0)
2753
{
2754
    uint32_t fst2;
2755

    
2756
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2757
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2758
    update_fcr31();
2759
    return fst2;
2760
}
2761

    
2762
uint64_t helper_float_recip1_ps(uint64_t fdt0)
2763
{
2764
    uint32_t fst2;
2765
    uint32_t fsth2;
2766

    
2767
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2768
    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2769
    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2770
    update_fcr31();
2771
    return ((uint64_t)fsth2 << 32) | fst2;
2772
}
2773

    
2774
uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2775
{
2776
    uint64_t fdt2;
2777

    
2778
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2779
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2780
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2781
    update_fcr31();
2782
    return fdt2;
2783
}
2784

    
2785
uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2786
{
2787
    uint32_t fst2;
2788

    
2789
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2790
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2791
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2792
    update_fcr31();
2793
    return fst2;
2794
}
2795

    
2796
uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2797
{
2798
    uint32_t fst2;
2799
    uint32_t fsth2;
2800

    
2801
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2802
    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2803
    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2804
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2805
    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2806
    update_fcr31();
2807
    return ((uint64_t)fsth2 << 32) | fst2;
2808
}
2809

    
2810
#define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2811

    
2812
/* binary operations */
2813
#define FLOAT_BINOP(name)                                          \
2814
uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
2815
{                                                                  \
2816
    uint64_t dt2;                                                  \
2817
                                                                   \
2818
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2819
    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
2820
    update_fcr31();                                                \
2821
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2822
        dt2 = FLOAT_QNAN64;                                        \
2823
    return dt2;                                                    \
2824
}                                                                  \
2825
                                                                   \
2826
uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
2827
{                                                                  \
2828
    uint32_t wt2;                                                  \
2829
                                                                   \
2830
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2831
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2832
    update_fcr31();                                                \
2833
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2834
        wt2 = FLOAT_QNAN32;                                        \
2835
    return wt2;                                                    \
2836
}                                                                  \
2837
                                                                   \
2838
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
2839
{                                                                  \
2840
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2841
    uint32_t fsth0 = fdt0 >> 32;                                   \
2842
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2843
    uint32_t fsth1 = fdt1 >> 32;                                   \
2844
    uint32_t wt2;                                                  \
2845
    uint32_t wth2;                                                 \
2846
                                                                   \
2847
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2848
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2849
    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
2850
    update_fcr31();                                                \
2851
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
2852
        wt2 = FLOAT_QNAN32;                                        \
2853
        wth2 = FLOAT_QNAN32;                                       \
2854
    }                                                              \
2855
    return ((uint64_t)wth2 << 32) | wt2;                           \
2856
}
2857

    
2858
FLOAT_BINOP(add)
2859
FLOAT_BINOP(sub)
2860
FLOAT_BINOP(mul)
2861
FLOAT_BINOP(div)
2862
#undef FLOAT_BINOP
2863

    
2864
/* ternary operations */
2865
#define FLOAT_TERNOP(name1, name2)                                        \
2866
uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
2867
                                           uint64_t fdt2)                 \
2868
{                                                                         \
2869
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2870
    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2871
}                                                                         \
2872
                                                                          \
2873
uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
2874
                                           uint32_t fst2)                 \
2875
{                                                                         \
2876
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2877
    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2878
}                                                                         \
2879
                                                                          \
2880
uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2881
                                            uint64_t fdt2)                \
2882
{                                                                         \
2883
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2884
    uint32_t fsth0 = fdt0 >> 32;                                          \
2885
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2886
    uint32_t fsth1 = fdt1 >> 32;                                          \
2887
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2888
    uint32_t fsth2 = fdt2 >> 32;                                          \
2889
                                                                          \
2890
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2891
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2892
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2893
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2894
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2895
}
2896

    
2897
FLOAT_TERNOP(mul, add)
2898
FLOAT_TERNOP(mul, sub)
2899
#undef FLOAT_TERNOP
2900

    
2901
/* negated ternary operations */
2902
#define FLOAT_NTERNOP(name1, name2)                                       \
2903
uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2904
                                           uint64_t fdt2)                 \
2905
{                                                                         \
2906
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2907
    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2908
    return float64_chs(fdt2);                                             \
2909
}                                                                         \
2910
                                                                          \
2911
uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2912
                                           uint32_t fst2)                 \
2913
{                                                                         \
2914
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2915
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2916
    return float32_chs(fst2);                                             \
2917
}                                                                         \
2918
                                                                          \
2919
uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2920
                                           uint64_t fdt2)                 \
2921
{                                                                         \
2922
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2923
    uint32_t fsth0 = fdt0 >> 32;                                          \
2924
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2925
    uint32_t fsth1 = fdt1 >> 32;                                          \
2926
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2927
    uint32_t fsth2 = fdt2 >> 32;                                          \
2928
                                                                          \
2929
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2930
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2931
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2932
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2933
    fst2 = float32_chs(fst2);                                             \
2934
    fsth2 = float32_chs(fsth2);                                           \
2935
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2936
}
2937

    
2938
FLOAT_NTERNOP(mul, add)
2939
FLOAT_NTERNOP(mul, sub)
2940
#undef FLOAT_NTERNOP
2941

    
2942
/* MIPS specific binary operations */
2943
uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2944
{
2945
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2946
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2947
    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2948
    update_fcr31();
2949
    return fdt2;
2950
}
2951

    
2952
uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
2953
{
2954
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2955
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2956
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2957
    update_fcr31();
2958
    return fst2;
2959
}
2960

    
2961
uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2962
{
2963
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2964
    uint32_t fsth0 = fdt0 >> 32;
2965
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2966
    uint32_t fsth2 = fdt2 >> 32;
2967

    
2968
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2969
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2970
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2971
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2972
    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
2973
    update_fcr31();
2974
    return ((uint64_t)fsth2 << 32) | fst2;
2975
}
2976

    
2977
uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
2978
{
2979
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2980
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2981
    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
2982
    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
2983
    update_fcr31();
2984
    return fdt2;
2985
}
2986

    
2987
uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
2988
{
2989
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2990
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2991
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2992
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2993
    update_fcr31();
2994
    return fst2;
2995
}
2996

    
2997
uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
2998
{
2999
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3000
    uint32_t fsth0 = fdt0 >> 32;
3001
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3002
    uint32_t fsth2 = fdt2 >> 32;
3003

    
3004
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3005
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3006
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3007
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3008
    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
3009
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3010
    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3011
    update_fcr31();
3012
    return ((uint64_t)fsth2 << 32) | fst2;
3013
}
3014

    
3015
uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
3016
{
3017
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3018
    uint32_t fsth0 = fdt0 >> 32;
3019
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3020
    uint32_t fsth1 = fdt1 >> 32;
3021
    uint32_t fst2;
3022
    uint32_t fsth2;
3023

    
3024
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3025
    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3026
    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3027
    update_fcr31();
3028
    return ((uint64_t)fsth2 << 32) | fst2;
3029
}
3030

    
3031
uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
3032
{
3033
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3034
    uint32_t fsth0 = fdt0 >> 32;
3035
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3036
    uint32_t fsth1 = fdt1 >> 32;
3037
    uint32_t fst2;
3038
    uint32_t fsth2;
3039

    
3040
    set_float_exception_flags(0, &env->active_fpu.fp_status);
3041
    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
3042
    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
3043
    update_fcr31();
3044
    return ((uint64_t)fsth2 << 32) | fst2;
3045
}
3046

    
3047
/* compare operations */
3048
#define FOP_COND_D(op, cond)                                   \
3049
void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
3050
{                                                              \
3051
    int c;                                                     \
3052
    set_float_exception_flags(0, &env->active_fpu.fp_status);  \
3053
    c = cond;                                                  \
3054
    update_fcr31();                                            \
3055
    if (c)                                                     \
3056
        SET_FP_COND(cc, env->active_fpu);                      \
3057
    else                                                       \
3058
        CLEAR_FP_COND(cc, env->active_fpu);                    \
3059
}                                                              \
3060
void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3061
{                                                              \
3062
    int c;                                                     \
3063
    set_float_exception_flags(0, &env->active_fpu.fp_status);  \
3064
    fdt0 = float64_abs(fdt0);                                  \
3065
    fdt1 = float64_abs(fdt1);                                  \
3066
    c = cond;                                                  \
3067
    update_fcr31();                                            \
3068
    if (c)                                                     \
3069
        SET_FP_COND(cc, env->active_fpu);                      \
3070
    else                                                       \
3071
        CLEAR_FP_COND(cc, env->active_fpu);                    \
3072
}
3073

    
3074
/* NOTE: the comma operator will make "cond" to eval to false,
3075
 * but float64_unordered_quiet() is still called. */
3076
FOP_COND_D(f,   (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3077
FOP_COND_D(un,  float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
3078
FOP_COND_D(eq,  float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3079
FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3080
FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3081
FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3082
FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3083
FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3084
/* NOTE: the comma operator will make "cond" to eval to false,
3085
 * but float64_unordered() is still called. */
3086
FOP_COND_D(sf,  (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3087
FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
3088
FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3089
FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3090
FOP_COND_D(lt,  float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3091
FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3092
FOP_COND_D(le,  float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3093
FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3094

    
3095
#define FOP_COND_S(op, cond)                                   \
3096
void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
3097
{                                                              \
3098
    int c;                                                     \
3099
    set_float_exception_flags(0, &env->active_fpu.fp_status);  \
3100
    c = cond;                                                  \
3101
    update_fcr31();                                            \
3102
    if (c)                                                     \
3103
        SET_FP_COND(cc, env->active_fpu);                      \
3104
    else                                                       \
3105
        CLEAR_FP_COND(cc, env->active_fpu);                    \
3106
}                                                              \
3107
void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3108
{                                                              \
3109
    int c;                                                     \
3110
    set_float_exception_flags(0, &env->active_fpu.fp_status);  \
3111
    fst0 = float32_abs(fst0);                                  \
3112
    fst1 = float32_abs(fst1);                                  \
3113
    c = cond;                                                  \
3114
    update_fcr31();                                            \
3115
    if (c)                                                     \
3116
        SET_FP_COND(cc, env->active_fpu);                      \
3117
    else                                                       \
3118
        CLEAR_FP_COND(cc, env->active_fpu);                    \
3119
}
3120

    
3121
/* NOTE: the comma operator will make "cond" to eval to false,
3122
 * but float32_unordered_quiet() is still called. */
3123
FOP_COND_S(f,   (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3124
FOP_COND_S(un,  float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
3125
FOP_COND_S(eq,  float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3126
FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)  || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3127
FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3128
FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)  || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3129
FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3130
FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)  || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3131
/* NOTE: the comma operator will make "cond" to eval to false,
3132
 * but float32_unordered() is still called. */
3133
FOP_COND_S(sf,  (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3134
FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
3135
FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3136
FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3137
FOP_COND_S(lt,  float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3138
FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3139
FOP_COND_S(le,  float32_le(fst0, fst1, &env->active_fpu.fp_status))
3140
FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3141

    
3142
#define FOP_COND_PS(op, condl, condh)                           \
3143
void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
3144
{                                                               \
3145
    uint32_t fst0, fsth0, fst1, fsth1;                          \
3146
    int ch, cl;                                                 \
3147
    set_float_exception_flags(0, &env->active_fpu.fp_status);   \
3148
    fst0 = fdt0 & 0XFFFFFFFF;                                   \
3149
    fsth0 = fdt0 >> 32;                                         \
3150
    fst1 = fdt1 & 0XFFFFFFFF;                                   \
3151
    fsth1 = fdt1 >> 32;                                         \
3152
    cl = condl;                                                 \
3153
    ch = condh;                                                 \
3154
    update_fcr31();                                             \
3155
    if (cl)                                                     \
3156
        SET_FP_COND(cc, env->active_fpu);                       \
3157
    else                                                        \
3158
        CLEAR_FP_COND(cc, env->active_fpu);                     \
3159
    if (ch)                                                     \
3160
        SET_FP_COND(cc + 1, env->active_fpu);                   \
3161
    else                                                        \
3162
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3163
}                                                               \
3164
void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3165
{                                                               \
3166
    uint32_t fst0, fsth0, fst1, fsth1;                          \
3167
    int ch, cl;                                                 \
3168
    fst0 = float32_abs(fdt0 & 0XFFFFFFFF);                      \
3169
    fsth0 = float32_abs(fdt0 >> 32);                            \
3170
    fst1 = float32_abs(fdt1 & 0XFFFFFFFF);                      \
3171
    fsth1 = float32_abs(fdt1 >> 32);                            \
3172
    cl = condl;                                                 \
3173
    ch = condh;                                                 \
3174
    update_fcr31();                                             \
3175
    if (cl)                                                     \
3176
        SET_FP_COND(cc, env->active_fpu);                       \
3177
    else                                                        \
3178
        CLEAR_FP_COND(cc, env->active_fpu);                     \
3179
    if (ch)                                                     \
3180
        SET_FP_COND(cc + 1, env->active_fpu);                   \
3181
    else                                                        \
3182
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3183
}
3184

    
3185
/* NOTE: the comma operator will make "cond" to eval to false,
3186
 * but float32_unordered_quiet() is still called. */
3187
FOP_COND_PS(f,   (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3188
                 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3189
FOP_COND_PS(un,  float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3190
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3191
FOP_COND_PS(eq,  float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3192
                 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3193
FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)    || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3194
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3195
FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3196
                 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3197
FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)    || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3198
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3199
FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3200
                 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3201
FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)    || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3202
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3203
/* NOTE: the comma operator will make "cond" to eval to false,
3204
 * but float32_unordered() is still called. */
3205
FOP_COND_PS(sf,  (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3206
                 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3207
FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3208
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3209
FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3210
                 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3211
FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3212
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3213
FOP_COND_PS(lt,  float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3214
                 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3215
FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3216
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3217
FOP_COND_PS(le,  float32_le(fst0, fst1, &env->active_fpu.fp_status),
3218
                 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3219
FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3220
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))