Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ bfb811ad

History | View | Annotate | Download (92.6 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdlib.h>
20
#include "exec.h"
21

    
22
#include "host-utils.h"
23

    
24
#include "helper.h"
25
/*****************************************************************************/
26
/* Exceptions processing helpers */
27

    
28
void helper_raise_exception_err (uint32_t exception, int error_code)
29
{
30
#if 1
31
    if (exception < 0x100)
32
        qemu_log("%s: %d %d\n", __func__, exception, error_code);
33
#endif
34
    env->exception_index = exception;
35
    env->error_code = error_code;
36
    cpu_loop_exit();
37
}
38

    
39
void helper_raise_exception (uint32_t exception)
40
{
41
    helper_raise_exception_err(exception, 0);
42
}
43

    
44
void helper_interrupt_restart (void)
45
{
46
    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
47
        !(env->CP0_Status & (1 << CP0St_ERL)) &&
48
        !(env->hflags & MIPS_HFLAG_DM) &&
49
        (env->CP0_Status & (1 << CP0St_IE)) &&
50
        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
51
        env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
52
        helper_raise_exception(EXCP_EXT_INTERRUPT);
53
    }
54
}
55

    
56
#if !defined(CONFIG_USER_ONLY)
57
static void do_restore_state (void *pc_ptr)
58
{
59
    TranslationBlock *tb;
60
    unsigned long pc = (unsigned long) pc_ptr;
61
    
62
    tb = tb_find_pc (pc);
63
    if (tb) {
64
        cpu_restore_state (tb, env, pc, NULL);
65
    }
66
}
67
#endif
68

    
69
#if defined(CONFIG_USER_ONLY)
70
#define HELPER_LD(name, insn, type)                                     \
71
static inline type do_##name(target_ulong addr, int mem_idx)            \
72
{                                                                       \
73
    return (type) insn##_raw(addr);                                     \
74
}
75
#else
76
#define HELPER_LD(name, insn, type)                                     \
77
static inline type do_##name(target_ulong addr, int mem_idx)            \
78
{                                                                       \
79
    switch (mem_idx)                                                    \
80
    {                                                                   \
81
    case 0: return (type) insn##_kernel(addr); break;                   \
82
    case 1: return (type) insn##_super(addr); break;                    \
83
    default:                                                            \
84
    case 2: return (type) insn##_user(addr); break;                     \
85
    }                                                                   \
86
}
87
#endif
88
HELPER_LD(lbu, ldub, uint8_t)
89
HELPER_LD(lw, ldl, int32_t)
90
#ifdef TARGET_MIPS64
91
HELPER_LD(ld, ldq, int64_t)
92
#endif
93
#undef HELPER_LD
94

    
95
#if defined(CONFIG_USER_ONLY)
96
#define HELPER_ST(name, insn, type)                                     \
97
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
98
{                                                                       \
99
    insn##_raw(addr, val);                                              \
100
}
101
#else
102
#define HELPER_ST(name, insn, type)                                     \
103
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
104
{                                                                       \
105
    switch (mem_idx)                                                    \
106
    {                                                                   \
107
    case 0: insn##_kernel(addr, val); break;                            \
108
    case 1: insn##_super(addr, val); break;                             \
109
    default:                                                            \
110
    case 2: insn##_user(addr, val); break;                              \
111
    }                                                                   \
112
}
113
#endif
114
HELPER_ST(sb, stb, uint8_t)
115
HELPER_ST(sw, stl, uint32_t)
116
#ifdef TARGET_MIPS64
117
HELPER_ST(sd, stq, uint64_t)
118
#endif
119
#undef HELPER_ST
120

    
121
target_ulong helper_clo (target_ulong arg1)
122
{
123
    return clo32(arg1);
124
}
125

    
126
target_ulong helper_clz (target_ulong arg1)
127
{
128
    return clz32(arg1);
129
}
130

    
131
#if defined(TARGET_MIPS64)
132
target_ulong helper_dclo (target_ulong arg1)
133
{
134
    return clo64(arg1);
135
}
136

    
137
target_ulong helper_dclz (target_ulong arg1)
138
{
139
    return clz64(arg1);
140
}
141
#endif /* TARGET_MIPS64 */
142

    
143
/* 64 bits arithmetic for 32 bits hosts */
144
static inline uint64_t get_HILO (void)
145
{
146
    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
147
}
148

    
149
static inline void set_HILO (uint64_t HILO)
150
{
151
    env->active_tc.LO[0] = (int32_t)HILO;
152
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
153
}
154

    
155
static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
156
{
157
    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
158
    arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
159
}
160

    
161
static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
162
{
163
    arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
164
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
165
}
166

    
167
/* Multiplication variants of the vr54xx. */
168
target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
169
{
170
    set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
171

    
172
    return arg1;
173
}
174

    
175
target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
176
{
177
    set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
178

    
179
    return arg1;
180
}
181

    
182
target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
183
{
184
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
185

    
186
    return arg1;
187
}
188

    
189
target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
190
{
191
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
192

    
193
    return arg1;
194
}
195

    
196
target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
197
{
198
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
199

    
200
    return arg1;
201
}
202

    
203
target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
204
{
205
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
206

    
207
    return arg1;
208
}
209

    
210
target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
211
{
212
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
213

    
214
    return arg1;
215
}
216

    
217
target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
218
{
219
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
220

    
221
    return arg1;
222
}
223

    
224
target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
225
{
226
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
227

    
228
    return arg1;
229
}
230

    
231
target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
232
{
233
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
234

    
235
    return arg1;
236
}
237

    
238
target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
239
{
240
    set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
241

    
242
    return arg1;
243
}
244

    
245
target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
246
{
247
    set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
248

    
249
    return arg1;
250
}
251

    
252
target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
253
{
254
    set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
255

    
256
    return arg1;
257
}
258

    
259
target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
260
{
261
    set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
262

    
263
    return arg1;
264
}
265

    
266
#ifdef TARGET_MIPS64
267
void helper_dmult (target_ulong arg1, target_ulong arg2)
268
{
269
    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
270
}
271

    
272
void helper_dmultu (target_ulong arg1, target_ulong arg2)
273
{
274
    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
275
}
276
#endif
277

    
278
#ifndef CONFIG_USER_ONLY
279
#define HELPER_LD_ATOMIC(name, insn)                                          \
280
target_ulong helper_##name(target_ulong arg, int mem_idx)                     \
281
{                                                                             \
282
    env->lladdr = do_translate_address(env, arg, 0);                          \
283
    env->llval = do_##insn(arg, mem_idx);                                     \
284
    return env->llval;                                                        \
285
}
286
HELPER_LD_ATOMIC(ll, lw)
287
#ifdef TARGET_MIPS64
288
HELPER_LD_ATOMIC(lld, ld)
289
#endif
290
#undef HELPER_LD_ATOMIC
291

    
292
#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask)                      \
293
target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
294
{                                                                             \
295
    target_long tmp;                                                          \
296
                                                                              \
297
    if (arg2 & almask) {                                                      \
298
        env->CP0_BadVAddr = arg2;                                             \
299
        helper_raise_exception(EXCP_AdES);                                    \
300
    }                                                                         \
301
    if (do_translate_address(env, arg2, 1) == env->lladdr) {                  \
302
        tmp = do_##ld_insn(arg2, mem_idx);                                    \
303
        if (tmp == env->llval) {                                              \
304
            do_##st_insn(arg2, arg1, mem_idx);                                \
305
            return 1;                                                         \
306
        }                                                                     \
307
    }                                                                         \
308
    return 0;                                                                 \
309
}
310
HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
311
#ifdef TARGET_MIPS64
312
HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
313
#endif
314
#undef HELPER_ST_ATOMIC
315
#endif
316

    
317
#ifdef TARGET_WORDS_BIGENDIAN
318
#define GET_LMASK(v) ((v) & 3)
319
#define GET_OFFSET(addr, offset) (addr + (offset))
320
#else
321
#define GET_LMASK(v) (((v) & 3) ^ 3)
322
#define GET_OFFSET(addr, offset) (addr - (offset))
323
#endif
324

    
325
target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
326
{
327
    target_ulong tmp;
328

    
329
    tmp = do_lbu(arg2, mem_idx);
330
    arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
331

    
332
    if (GET_LMASK(arg2) <= 2) {
333
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
334
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
335
    }
336

    
337
    if (GET_LMASK(arg2) <= 1) {
338
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
339
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
340
    }
341

    
342
    if (GET_LMASK(arg2) == 0) {
343
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
344
        arg1 = (arg1 & 0xFFFFFF00) | tmp;
345
    }
346
    return (int32_t)arg1;
347
}
348

    
349
target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
350
{
351
    target_ulong tmp;
352

    
353
    tmp = do_lbu(arg2, mem_idx);
354
    arg1 = (arg1 & 0xFFFFFF00) | tmp;
355

    
356
    if (GET_LMASK(arg2) >= 1) {
357
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
358
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
359
    }
360

    
361
    if (GET_LMASK(arg2) >= 2) {
362
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
363
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
364
    }
365

    
366
    if (GET_LMASK(arg2) == 3) {
367
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
368
        arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
369
    }
370
    return (int32_t)arg1;
371
}
372

    
373
void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
374
{
375
    do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
376

    
377
    if (GET_LMASK(arg2) <= 2)
378
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
379

    
380
    if (GET_LMASK(arg2) <= 1)
381
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
382

    
383
    if (GET_LMASK(arg2) == 0)
384
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
385
}
386

    
387
void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
388
{
389
    do_sb(arg2, (uint8_t)arg1, mem_idx);
390

    
391
    if (GET_LMASK(arg2) >= 1)
392
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
393

    
394
    if (GET_LMASK(arg2) >= 2)
395
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
396

    
397
    if (GET_LMASK(arg2) == 3)
398
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
399
}
400

    
401
#if defined(TARGET_MIPS64)
402
/* "half" load and stores.  We must do the memory access inline,
403
   or fault handling won't work.  */
404

    
405
#ifdef TARGET_WORDS_BIGENDIAN
406
#define GET_LMASK64(v) ((v) & 7)
407
#else
408
#define GET_LMASK64(v) (((v) & 7) ^ 7)
409
#endif
410

    
411
target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
412
{
413
    uint64_t tmp;
414

    
415
    tmp = do_lbu(arg2, mem_idx);
416
    arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
417

    
418
    if (GET_LMASK64(arg2) <= 6) {
419
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
420
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
421
    }
422

    
423
    if (GET_LMASK64(arg2) <= 5) {
424
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
425
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
426
    }
427

    
428
    if (GET_LMASK64(arg2) <= 4) {
429
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
430
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
431
    }
432

    
433
    if (GET_LMASK64(arg2) <= 3) {
434
        tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
435
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
436
    }
437

    
438
    if (GET_LMASK64(arg2) <= 2) {
439
        tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
440
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
441
    }
442

    
443
    if (GET_LMASK64(arg2) <= 1) {
444
        tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
445
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
446
    }
447

    
448
    if (GET_LMASK64(arg2) == 0) {
449
        tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
450
        arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
451
    }
452

    
453
    return arg1;
454
}
455

    
456
target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
457
{
458
    uint64_t tmp;
459

    
460
    tmp = do_lbu(arg2, mem_idx);
461
    arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
462

    
463
    if (GET_LMASK64(arg2) >= 1) {
464
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
465
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
466
    }
467

    
468
    if (GET_LMASK64(arg2) >= 2) {
469
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
470
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
471
    }
472

    
473
    if (GET_LMASK64(arg2) >= 3) {
474
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
475
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
476
    }
477

    
478
    if (GET_LMASK64(arg2) >= 4) {
479
        tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
480
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
481
    }
482

    
483
    if (GET_LMASK64(arg2) >= 5) {
484
        tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
485
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
486
    }
487

    
488
    if (GET_LMASK64(arg2) >= 6) {
489
        tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
490
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
491
    }
492

    
493
    if (GET_LMASK64(arg2) == 7) {
494
        tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
495
        arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
496
    }
497

    
498
    return arg1;
499
}
500

    
501
void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
502
{
503
    do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
504

    
505
    if (GET_LMASK64(arg2) <= 6)
506
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
507

    
508
    if (GET_LMASK64(arg2) <= 5)
509
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
510

    
511
    if (GET_LMASK64(arg2) <= 4)
512
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
513

    
514
    if (GET_LMASK64(arg2) <= 3)
515
        do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
516

    
517
    if (GET_LMASK64(arg2) <= 2)
518
        do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
519

    
520
    if (GET_LMASK64(arg2) <= 1)
521
        do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
522

    
523
    if (GET_LMASK64(arg2) <= 0)
524
        do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
525
}
526

    
527
void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
528
{
529
    do_sb(arg2, (uint8_t)arg1, mem_idx);
530

    
531
    if (GET_LMASK64(arg2) >= 1)
532
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
533

    
534
    if (GET_LMASK64(arg2) >= 2)
535
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
536

    
537
    if (GET_LMASK64(arg2) >= 3)
538
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
539

    
540
    if (GET_LMASK64(arg2) >= 4)
541
        do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
542

    
543
    if (GET_LMASK64(arg2) >= 5)
544
        do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
545

    
546
    if (GET_LMASK64(arg2) >= 6)
547
        do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
548

    
549
    if (GET_LMASK64(arg2) == 7)
550
        do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
551
}
552
#endif /* TARGET_MIPS64 */
553

    
554
#ifndef CONFIG_USER_ONLY
555
/* CP0 helpers */
556
target_ulong helper_mfc0_mvpcontrol (void)
557
{
558
    return env->mvp->CP0_MVPControl;
559
}
560

    
561
target_ulong helper_mfc0_mvpconf0 (void)
562
{
563
    return env->mvp->CP0_MVPConf0;
564
}
565

    
566
target_ulong helper_mfc0_mvpconf1 (void)
567
{
568
    return env->mvp->CP0_MVPConf1;
569
}
570

    
571
target_ulong helper_mfc0_random (void)
572
{
573
    return (int32_t)cpu_mips_get_random(env);
574
}
575

    
576
target_ulong helper_mfc0_tcstatus (void)
577
{
578
    return env->active_tc.CP0_TCStatus;
579
}
580

    
581
target_ulong helper_mftc0_tcstatus(void)
582
{
583
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
584

    
585
    if (other_tc == env->current_tc)
586
        return env->active_tc.CP0_TCStatus;
587
    else
588
        return env->tcs[other_tc].CP0_TCStatus;
589
}
590

    
591
target_ulong helper_mfc0_tcbind (void)
592
{
593
    return env->active_tc.CP0_TCBind;
594
}
595

    
596
target_ulong helper_mftc0_tcbind(void)
597
{
598
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
599

    
600
    if (other_tc == env->current_tc)
601
        return env->active_tc.CP0_TCBind;
602
    else
603
        return env->tcs[other_tc].CP0_TCBind;
604
}
605

    
606
target_ulong helper_mfc0_tcrestart (void)
607
{
608
    return env->active_tc.PC;
609
}
610

    
611
target_ulong helper_mftc0_tcrestart(void)
612
{
613
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
614

    
615
    if (other_tc == env->current_tc)
616
        return env->active_tc.PC;
617
    else
618
        return env->tcs[other_tc].PC;
619
}
620

    
621
target_ulong helper_mfc0_tchalt (void)
622
{
623
    return env->active_tc.CP0_TCHalt;
624
}
625

    
626
target_ulong helper_mftc0_tchalt(void)
627
{
628
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
629

    
630
    if (other_tc == env->current_tc)
631
        return env->active_tc.CP0_TCHalt;
632
    else
633
        return env->tcs[other_tc].CP0_TCHalt;
634
}
635

    
636
target_ulong helper_mfc0_tccontext (void)
637
{
638
    return env->active_tc.CP0_TCContext;
639
}
640

    
641
target_ulong helper_mftc0_tccontext(void)
642
{
643
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
644

    
645
    if (other_tc == env->current_tc)
646
        return env->active_tc.CP0_TCContext;
647
    else
648
        return env->tcs[other_tc].CP0_TCContext;
649
}
650

    
651
target_ulong helper_mfc0_tcschedule (void)
652
{
653
    return env->active_tc.CP0_TCSchedule;
654
}
655

    
656
target_ulong helper_mftc0_tcschedule(void)
657
{
658
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
659

    
660
    if (other_tc == env->current_tc)
661
        return env->active_tc.CP0_TCSchedule;
662
    else
663
        return env->tcs[other_tc].CP0_TCSchedule;
664
}
665

    
666
target_ulong helper_mfc0_tcschefback (void)
667
{
668
    return env->active_tc.CP0_TCScheFBack;
669
}
670

    
671
target_ulong helper_mftc0_tcschefback(void)
672
{
673
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
674

    
675
    if (other_tc == env->current_tc)
676
        return env->active_tc.CP0_TCScheFBack;
677
    else
678
        return env->tcs[other_tc].CP0_TCScheFBack;
679
}
680

    
681
target_ulong helper_mfc0_count (void)
682
{
683
    return (int32_t)cpu_mips_get_count(env);
684
}
685

    
686
target_ulong helper_mftc0_entryhi(void)
687
{
688
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
689
    int32_t tcstatus;
690

    
691
    if (other_tc == env->current_tc)
692
        tcstatus = env->active_tc.CP0_TCStatus;
693
    else
694
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
695

    
696
    return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
697
}
698

    
699
target_ulong helper_mftc0_status(void)
700
{
701
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
702
    target_ulong t0;
703
    int32_t tcstatus;
704

    
705
    if (other_tc == env->current_tc)
706
        tcstatus = env->active_tc.CP0_TCStatus;
707
    else
708
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
709

    
710
    t0 = env->CP0_Status & ~0xf1000018;
711
    t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
712
    t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
713
    t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
714

    
715
    return t0;
716
}
717

    
718
target_ulong helper_mfc0_lladdr (void)
719
{
720
    return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
721
}
722

    
723
target_ulong helper_mfc0_watchlo (uint32_t sel)
724
{
725
    return (int32_t)env->CP0_WatchLo[sel];
726
}
727

    
728
target_ulong helper_mfc0_watchhi (uint32_t sel)
729
{
730
    return env->CP0_WatchHi[sel];
731
}
732

    
733
target_ulong helper_mfc0_debug (void)
734
{
735
    target_ulong t0 = env->CP0_Debug;
736
    if (env->hflags & MIPS_HFLAG_DM)
737
        t0 |= 1 << CP0DB_DM;
738

    
739
    return t0;
740
}
741

    
742
target_ulong helper_mftc0_debug(void)
743
{
744
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
745
    int32_t tcstatus;
746

    
747
    if (other_tc == env->current_tc)
748
        tcstatus = env->active_tc.CP0_Debug_tcstatus;
749
    else
750
        tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
751

    
752
    /* XXX: Might be wrong, check with EJTAG spec. */
753
    return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
754
            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
755
}
756

    
757
#if defined(TARGET_MIPS64)
758
target_ulong helper_dmfc0_tcrestart (void)
759
{
760
    return env->active_tc.PC;
761
}
762

    
763
target_ulong helper_dmfc0_tchalt (void)
764
{
765
    return env->active_tc.CP0_TCHalt;
766
}
767

    
768
target_ulong helper_dmfc0_tccontext (void)
769
{
770
    return env->active_tc.CP0_TCContext;
771
}
772

    
773
target_ulong helper_dmfc0_tcschedule (void)
774
{
775
    return env->active_tc.CP0_TCSchedule;
776
}
777

    
778
target_ulong helper_dmfc0_tcschefback (void)
779
{
780
    return env->active_tc.CP0_TCScheFBack;
781
}
782

    
783
target_ulong helper_dmfc0_lladdr (void)
784
{
785
    return env->lladdr >> env->CP0_LLAddr_shift;
786
}
787

    
788
target_ulong helper_dmfc0_watchlo (uint32_t sel)
789
{
790
    return env->CP0_WatchLo[sel];
791
}
792
#endif /* TARGET_MIPS64 */
793

    
794
void helper_mtc0_index (target_ulong arg1)
795
{
796
    int num = 1;
797
    unsigned int tmp = env->tlb->nb_tlb;
798

    
799
    do {
800
        tmp >>= 1;
801
        num <<= 1;
802
    } while (tmp);
803
    env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
804
}
805

    
806
void helper_mtc0_mvpcontrol (target_ulong arg1)
807
{
808
    uint32_t mask = 0;
809
    uint32_t newval;
810

    
811
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
812
        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
813
                (1 << CP0MVPCo_EVP);
814
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
815
        mask |= (1 << CP0MVPCo_STLB);
816
    newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
817

    
818
    // TODO: Enable/disable shared TLB, enable/disable VPEs.
819

    
820
    env->mvp->CP0_MVPControl = newval;
821
}
822

    
823
void helper_mtc0_vpecontrol (target_ulong arg1)
824
{
825
    uint32_t mask;
826
    uint32_t newval;
827

    
828
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
829
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
830
    newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
831

    
832
    /* Yield scheduler intercept not implemented. */
833
    /* Gating storage scheduler intercept not implemented. */
834

    
835
    // TODO: Enable/disable TCs.
836

    
837
    env->CP0_VPEControl = newval;
838
}
839

    
840
void helper_mtc0_vpeconf0 (target_ulong arg1)
841
{
842
    uint32_t mask = 0;
843
    uint32_t newval;
844

    
845
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
846
        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
847
            mask |= (0xff << CP0VPEC0_XTC);
848
        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
849
    }
850
    newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
851

    
852
    // TODO: TC exclusive handling due to ERL/EXL.
853

    
854
    env->CP0_VPEConf0 = newval;
855
}
856

    
857
void helper_mtc0_vpeconf1 (target_ulong arg1)
858
{
859
    uint32_t mask = 0;
860
    uint32_t newval;
861

    
862
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
863
        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
864
                (0xff << CP0VPEC1_NCP1);
865
    newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
866

    
867
    /* UDI not implemented. */
868
    /* CP2 not implemented. */
869

    
870
    // TODO: Handle FPU (CP1) binding.
871

    
872
    env->CP0_VPEConf1 = newval;
873
}
874

    
875
void helper_mtc0_yqmask (target_ulong arg1)
876
{
877
    /* Yield qualifier inputs not implemented. */
878
    env->CP0_YQMask = 0x00000000;
879
}
880

    
881
void helper_mtc0_vpeopt (target_ulong arg1)
882
{
883
    env->CP0_VPEOpt = arg1 & 0x0000ffff;
884
}
885

    
886
void helper_mtc0_entrylo0 (target_ulong arg1)
887
{
888
    /* Large physaddr (PABITS) not implemented */
889
    /* 1k pages not implemented */
890
    env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
891
}
892

    
893
void helper_mtc0_tcstatus (target_ulong arg1)
894
{
895
    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
896
    uint32_t newval;
897

    
898
    newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
899

    
900
    // TODO: Sync with CP0_Status.
901

    
902
    env->active_tc.CP0_TCStatus = newval;
903
}
904

    
905
void helper_mttc0_tcstatus (target_ulong arg1)
906
{
907
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
908

    
909
    // TODO: Sync with CP0_Status.
910

    
911
    if (other_tc == env->current_tc)
912
        env->active_tc.CP0_TCStatus = arg1;
913
    else
914
        env->tcs[other_tc].CP0_TCStatus = arg1;
915
}
916

    
917
void helper_mtc0_tcbind (target_ulong arg1)
918
{
919
    uint32_t mask = (1 << CP0TCBd_TBE);
920
    uint32_t newval;
921

    
922
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
923
        mask |= (1 << CP0TCBd_CurVPE);
924
    newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
925
    env->active_tc.CP0_TCBind = newval;
926
}
927

    
928
void helper_mttc0_tcbind (target_ulong arg1)
929
{
930
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
931
    uint32_t mask = (1 << CP0TCBd_TBE);
932
    uint32_t newval;
933

    
934
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
935
        mask |= (1 << CP0TCBd_CurVPE);
936
    if (other_tc == env->current_tc) {
937
        newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
938
        env->active_tc.CP0_TCBind = newval;
939
    } else {
940
        newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
941
        env->tcs[other_tc].CP0_TCBind = newval;
942
    }
943
}
944

    
945
void helper_mtc0_tcrestart (target_ulong arg1)
946
{
947
    env->active_tc.PC = arg1;
948
    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
949
    env->lladdr = 0ULL;
950
    /* MIPS16 not implemented. */
951
}
952

    
953
void helper_mttc0_tcrestart (target_ulong arg1)
954
{
955
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
956

    
957
    if (other_tc == env->current_tc) {
958
        env->active_tc.PC = arg1;
959
        env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
960
        env->lladdr = 0ULL;
961
        /* MIPS16 not implemented. */
962
    } else {
963
        env->tcs[other_tc].PC = arg1;
964
        env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
965
        env->lladdr = 0ULL;
966
        /* MIPS16 not implemented. */
967
    }
968
}
969

    
970
void helper_mtc0_tchalt (target_ulong arg1)
971
{
972
    env->active_tc.CP0_TCHalt = arg1 & 0x1;
973

    
974
    // TODO: Halt TC / Restart (if allocated+active) TC.
975
}
976

    
977
void helper_mttc0_tchalt (target_ulong arg1)
978
{
979
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
980

    
981
    // TODO: Halt TC / Restart (if allocated+active) TC.
982

    
983
    if (other_tc == env->current_tc)
984
        env->active_tc.CP0_TCHalt = arg1;
985
    else
986
        env->tcs[other_tc].CP0_TCHalt = arg1;
987
}
988

    
989
void helper_mtc0_tccontext (target_ulong arg1)
990
{
991
    env->active_tc.CP0_TCContext = arg1;
992
}
993

    
994
void helper_mttc0_tccontext (target_ulong arg1)
995
{
996
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
997

    
998
    if (other_tc == env->current_tc)
999
        env->active_tc.CP0_TCContext = arg1;
1000
    else
1001
        env->tcs[other_tc].CP0_TCContext = arg1;
1002
}
1003

    
1004
void helper_mtc0_tcschedule (target_ulong arg1)
1005
{
1006
    env->active_tc.CP0_TCSchedule = arg1;
1007
}
1008

    
1009
void helper_mttc0_tcschedule (target_ulong arg1)
1010
{
1011
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1012

    
1013
    if (other_tc == env->current_tc)
1014
        env->active_tc.CP0_TCSchedule = arg1;
1015
    else
1016
        env->tcs[other_tc].CP0_TCSchedule = arg1;
1017
}
1018

    
1019
void helper_mtc0_tcschefback (target_ulong arg1)
1020
{
1021
    env->active_tc.CP0_TCScheFBack = arg1;
1022
}
1023

    
1024
void helper_mttc0_tcschefback (target_ulong arg1)
1025
{
1026
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1027

    
1028
    if (other_tc == env->current_tc)
1029
        env->active_tc.CP0_TCScheFBack = arg1;
1030
    else
1031
        env->tcs[other_tc].CP0_TCScheFBack = arg1;
1032
}
1033

    
1034
void helper_mtc0_entrylo1 (target_ulong arg1)
1035
{
1036
    /* Large physaddr (PABITS) not implemented */
1037
    /* 1k pages not implemented */
1038
    env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1039
}
1040

    
1041
void helper_mtc0_context (target_ulong arg1)
1042
{
1043
    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1044
}
1045

    
1046
void helper_mtc0_pagemask (target_ulong arg1)
1047
{
1048
    /* 1k pages not implemented */
1049
    env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1050
}
1051

    
1052
void helper_mtc0_pagegrain (target_ulong arg1)
1053
{
1054
    /* SmartMIPS not implemented */
1055
    /* Large physaddr (PABITS) not implemented */
1056
    /* 1k pages not implemented */
1057
    env->CP0_PageGrain = 0;
1058
}
1059

    
1060
void helper_mtc0_wired (target_ulong arg1)
1061
{
1062
    env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1063
}
1064

    
1065
void helper_mtc0_srsconf0 (target_ulong arg1)
1066
{
1067
    env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1068
}
1069

    
1070
void helper_mtc0_srsconf1 (target_ulong arg1)
1071
{
1072
    env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1073
}
1074

    
1075
void helper_mtc0_srsconf2 (target_ulong arg1)
1076
{
1077
    env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1078
}
1079

    
1080
void helper_mtc0_srsconf3 (target_ulong arg1)
1081
{
1082
    env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1083
}
1084

    
1085
void helper_mtc0_srsconf4 (target_ulong arg1)
1086
{
1087
    env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1088
}
1089

    
1090
void helper_mtc0_hwrena (target_ulong arg1)
1091
{
1092
    env->CP0_HWREna = arg1 & 0x0000000F;
1093
}
1094

    
1095
void helper_mtc0_count (target_ulong arg1)
1096
{
1097
    cpu_mips_store_count(env, arg1);
1098
}
1099

    
1100
void helper_mtc0_entryhi (target_ulong arg1)
1101
{
1102
    target_ulong old, val;
1103

    
1104
    /* 1k pages not implemented */
1105
    val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1106
#if defined(TARGET_MIPS64)
1107
    val &= env->SEGMask;
1108
#endif
1109
    old = env->CP0_EntryHi;
1110
    env->CP0_EntryHi = val;
1111
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1112
        uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
1113
        env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
1114
    }
1115
    /* If the ASID changes, flush qemu's TLB.  */
1116
    if ((old & 0xFF) != (val & 0xFF))
1117
        cpu_mips_tlb_flush(env, 1);
1118
}
1119

    
1120
void helper_mttc0_entryhi(target_ulong arg1)
1121
{
1122
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1123
    int32_t tcstatus;
1124

    
1125
    env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (arg1 & ~0xff);
1126
    if (other_tc == env->current_tc) {
1127
        tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1128
        env->active_tc.CP0_TCStatus = tcstatus;
1129
    } else {
1130
        tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1131
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1132
    }
1133
}
1134

    
1135
void helper_mtc0_compare (target_ulong arg1)
1136
{
1137
    cpu_mips_store_compare(env, arg1);
1138
}
1139

    
1140
void helper_mtc0_status (target_ulong arg1)
1141
{
1142
    uint32_t val, old;
1143
    uint32_t mask = env->CP0_Status_rw_bitmask;
1144

    
1145
    val = arg1 & mask;
1146
    old = env->CP0_Status;
1147
    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1148
    compute_hflags(env);
1149
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1150
        qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1151
                old, old & env->CP0_Cause & CP0Ca_IP_mask,
1152
                val, val & env->CP0_Cause & CP0Ca_IP_mask,
1153
                env->CP0_Cause);
1154
        switch (env->hflags & MIPS_HFLAG_KSU) {
1155
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1156
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1157
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1158
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1159
        }
1160
    }
1161
    cpu_mips_update_irq(env);
1162
}
1163

    
1164
void helper_mttc0_status(target_ulong arg1)
1165
{
1166
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1167
    int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
1168

    
1169
    env->CP0_Status = arg1 & ~0xf1000018;
1170
    tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (arg1 & (0xf << CP0St_CU0));
1171
    tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((arg1 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
1172
    tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((arg1 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
1173
    if (other_tc == env->current_tc)
1174
        env->active_tc.CP0_TCStatus = tcstatus;
1175
    else
1176
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1177
}
1178

    
1179
void helper_mtc0_intctl (target_ulong arg1)
1180
{
1181
    /* vectored interrupts not implemented, no performance counters. */
1182
    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (arg1 & 0x000002e0);
1183
}
1184

    
1185
void helper_mtc0_srsctl (target_ulong arg1)
1186
{
1187
    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1188
    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1189
}
1190

    
1191
void helper_mtc0_cause (target_ulong arg1)
1192
{
1193
    uint32_t mask = 0x00C00300;
1194
    uint32_t old = env->CP0_Cause;
1195

    
1196
    if (env->insn_flags & ISA_MIPS32R2)
1197
        mask |= 1 << CP0Ca_DC;
1198

    
1199
    env->CP0_Cause = (env->CP0_Cause & ~mask) | (arg1 & mask);
1200

    
1201
    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1202
        if (env->CP0_Cause & (1 << CP0Ca_DC))
1203
            cpu_mips_stop_count(env);
1204
        else
1205
            cpu_mips_start_count(env);
1206
    }
1207

    
1208
    /* Handle the software interrupt as an hardware one, as they
1209
       are very similar */
1210
    if (arg1 & CP0Ca_IP_mask) {
1211
        cpu_mips_update_irq(env);
1212
    }
1213
}
1214

    
1215
void helper_mtc0_ebase (target_ulong arg1)
1216
{
1217
    /* vectored interrupts not implemented */
1218
    /* Multi-CPU not implemented */
1219
    env->CP0_EBase = 0x80000000 | (arg1 & 0x3FFFF000);
1220
}
1221

    
1222
void helper_mtc0_config0 (target_ulong arg1)
1223
{
1224
    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1225
}
1226

    
1227
void helper_mtc0_config2 (target_ulong arg1)
1228
{
1229
    /* tertiary/secondary caches not implemented */
1230
    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1231
}
1232

    
1233
void helper_mtc0_lladdr (target_ulong arg1)
1234
{
1235
    target_long mask = env->CP0_LLAddr_rw_bitmask;
1236
    arg1 = arg1 << env->CP0_LLAddr_shift;
1237
    env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1238
}
1239

    
1240
void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1241
{
1242
    /* Watch exceptions for instructions, data loads, data stores
1243
       not implemented. */
1244
    env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1245
}
1246

    
1247
void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1248
{
1249
    env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1250
    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1251
}
1252

    
1253
void helper_mtc0_xcontext (target_ulong arg1)
1254
{
1255
    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1256
    env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1257
}
1258

    
1259
void helper_mtc0_framemask (target_ulong arg1)
1260
{
1261
    env->CP0_Framemask = arg1; /* XXX */
1262
}
1263

    
1264
void helper_mtc0_debug (target_ulong arg1)
1265
{
1266
    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1267
    if (arg1 & (1 << CP0DB_DM))
1268
        env->hflags |= MIPS_HFLAG_DM;
1269
    else
1270
        env->hflags &= ~MIPS_HFLAG_DM;
1271
}
1272

    
1273
void helper_mttc0_debug(target_ulong arg1)
1274
{
1275
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1276
    uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1277

    
1278
    /* XXX: Might be wrong, check with EJTAG spec. */
1279
    if (other_tc == env->current_tc)
1280
        env->active_tc.CP0_Debug_tcstatus = val;
1281
    else
1282
        env->tcs[other_tc].CP0_Debug_tcstatus = val;
1283
    env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1284
                     (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1285
}
1286

    
1287
void helper_mtc0_performance0 (target_ulong arg1)
1288
{
1289
    env->CP0_Performance0 = arg1 & 0x000007ff;
1290
}
1291

    
1292
void helper_mtc0_taglo (target_ulong arg1)
1293
{
1294
    env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1295
}
1296

    
1297
void helper_mtc0_datalo (target_ulong arg1)
1298
{
1299
    env->CP0_DataLo = arg1; /* XXX */
1300
}
1301

    
1302
void helper_mtc0_taghi (target_ulong arg1)
1303
{
1304
    env->CP0_TagHi = arg1; /* XXX */
1305
}
1306

    
1307
void helper_mtc0_datahi (target_ulong arg1)
1308
{
1309
    env->CP0_DataHi = arg1; /* XXX */
1310
}
1311

    
1312
/* MIPS MT functions */
1313
target_ulong helper_mftgpr(uint32_t sel)
1314
{
1315
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1316

    
1317
    if (other_tc == env->current_tc)
1318
        return env->active_tc.gpr[sel];
1319
    else
1320
        return env->tcs[other_tc].gpr[sel];
1321
}
1322

    
1323
target_ulong helper_mftlo(uint32_t sel)
1324
{
1325
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1326

    
1327
    if (other_tc == env->current_tc)
1328
        return env->active_tc.LO[sel];
1329
    else
1330
        return env->tcs[other_tc].LO[sel];
1331
}
1332

    
1333
target_ulong helper_mfthi(uint32_t sel)
1334
{
1335
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1336

    
1337
    if (other_tc == env->current_tc)
1338
        return env->active_tc.HI[sel];
1339
    else
1340
        return env->tcs[other_tc].HI[sel];
1341
}
1342

    
1343
target_ulong helper_mftacx(uint32_t sel)
1344
{
1345
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1346

    
1347
    if (other_tc == env->current_tc)
1348
        return env->active_tc.ACX[sel];
1349
    else
1350
        return env->tcs[other_tc].ACX[sel];
1351
}
1352

    
1353
target_ulong helper_mftdsp(void)
1354
{
1355
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1356

    
1357
    if (other_tc == env->current_tc)
1358
        return env->active_tc.DSPControl;
1359
    else
1360
        return env->tcs[other_tc].DSPControl;
1361
}
1362

    
1363
void helper_mttgpr(target_ulong arg1, uint32_t sel)
1364
{
1365
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1366

    
1367
    if (other_tc == env->current_tc)
1368
        env->active_tc.gpr[sel] = arg1;
1369
    else
1370
        env->tcs[other_tc].gpr[sel] = arg1;
1371
}
1372

    
1373
void helper_mttlo(target_ulong arg1, uint32_t sel)
1374
{
1375
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1376

    
1377
    if (other_tc == env->current_tc)
1378
        env->active_tc.LO[sel] = arg1;
1379
    else
1380
        env->tcs[other_tc].LO[sel] = arg1;
1381
}
1382

    
1383
void helper_mtthi(target_ulong arg1, uint32_t sel)
1384
{
1385
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1386

    
1387
    if (other_tc == env->current_tc)
1388
        env->active_tc.HI[sel] = arg1;
1389
    else
1390
        env->tcs[other_tc].HI[sel] = arg1;
1391
}
1392

    
1393
void helper_mttacx(target_ulong arg1, uint32_t sel)
1394
{
1395
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1396

    
1397
    if (other_tc == env->current_tc)
1398
        env->active_tc.ACX[sel] = arg1;
1399
    else
1400
        env->tcs[other_tc].ACX[sel] = arg1;
1401
}
1402

    
1403
void helper_mttdsp(target_ulong arg1)
1404
{
1405
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1406

    
1407
    if (other_tc == env->current_tc)
1408
        env->active_tc.DSPControl = arg1;
1409
    else
1410
        env->tcs[other_tc].DSPControl = arg1;
1411
}
1412

    
1413
/* MIPS MT functions */
1414
target_ulong helper_dmt(target_ulong arg1)
1415
{
1416
    // TODO
1417
    arg1 = 0;
1418
    // rt = arg1
1419

    
1420
    return arg1;
1421
}
1422

    
1423
target_ulong helper_emt(target_ulong arg1)
1424
{
1425
    // TODO
1426
    arg1 = 0;
1427
    // rt = arg1
1428

    
1429
    return arg1;
1430
}
1431

    
1432
target_ulong helper_dvpe(target_ulong arg1)
1433
{
1434
    // TODO
1435
    arg1 = 0;
1436
    // rt = arg1
1437

    
1438
    return arg1;
1439
}
1440

    
1441
target_ulong helper_evpe(target_ulong arg1)
1442
{
1443
    // TODO
1444
    arg1 = 0;
1445
    // rt = arg1
1446

    
1447
    return arg1;
1448
}
1449
#endif /* !CONFIG_USER_ONLY */
1450

    
1451
void helper_fork(target_ulong arg1, target_ulong arg2)
1452
{
1453
    // arg1 = rt, arg2 = rs
1454
    arg1 = 0;
1455
    // TODO: store to TC register
1456
}
1457

    
1458
target_ulong helper_yield(target_ulong arg1)
1459
{
1460
    if (arg1 < 0) {
1461
        /* No scheduling policy implemented. */
1462
        if (arg1 != -2) {
1463
            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1464
                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1465
                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1466
                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1467
                helper_raise_exception(EXCP_THREAD);
1468
            }
1469
        }
1470
    } else if (arg1 == 0) {
1471
        if (0 /* TODO: TC underflow */) {
1472
            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1473
            helper_raise_exception(EXCP_THREAD);
1474
        } else {
1475
            // TODO: Deallocate TC
1476
        }
1477
    } else if (arg1 > 0) {
1478
        /* Yield qualifier inputs not implemented. */
1479
        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1480
        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1481
        helper_raise_exception(EXCP_THREAD);
1482
    }
1483
    return env->CP0_YQMask;
1484
}
1485

    
1486
#ifndef CONFIG_USER_ONLY
1487
/* TLB management */
1488
void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1489
{
1490
    /* Flush qemu's TLB and discard all shadowed entries.  */
1491
    tlb_flush (env, flush_global);
1492
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
1493
}
1494

    
1495
static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1496
{
1497
    /* Discard entries from env->tlb[first] onwards.  */
1498
    while (env->tlb->tlb_in_use > first) {
1499
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1500
    }
1501
}
1502

    
1503
static void r4k_fill_tlb (int idx)
1504
{
1505
    r4k_tlb_t *tlb;
1506

    
1507
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1508
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1509
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1510
#if defined(TARGET_MIPS64)
1511
    tlb->VPN &= env->SEGMask;
1512
#endif
1513
    tlb->ASID = env->CP0_EntryHi & 0xFF;
1514
    tlb->PageMask = env->CP0_PageMask;
1515
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1516
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1517
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1518
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1519
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1520
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1521
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1522
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1523
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1524
}
1525

    
1526
void r4k_helper_tlbwi (void)
1527
{
1528
    int idx;
1529

    
1530
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1531

    
1532
    /* Discard cached TLB entries.  We could avoid doing this if the
1533
       tlbwi is just upgrading access permissions on the current entry;
1534
       that might be a further win.  */
1535
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1536

    
1537
    r4k_invalidate_tlb(env, idx, 0);
1538
    r4k_fill_tlb(idx);
1539
}
1540

    
1541
void r4k_helper_tlbwr (void)
1542
{
1543
    int r = cpu_mips_get_random(env);
1544

    
1545
    r4k_invalidate_tlb(env, r, 1);
1546
    r4k_fill_tlb(r);
1547
}
1548

    
1549
void r4k_helper_tlbp (void)
1550
{
1551
    r4k_tlb_t *tlb;
1552
    target_ulong mask;
1553
    target_ulong tag;
1554
    target_ulong VPN;
1555
    uint8_t ASID;
1556
    int i;
1557

    
1558
    ASID = env->CP0_EntryHi & 0xFF;
1559
    for (i = 0; i < env->tlb->nb_tlb; i++) {
1560
        tlb = &env->tlb->mmu.r4k.tlb[i];
1561
        /* 1k pages are not supported. */
1562
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1563
        tag = env->CP0_EntryHi & ~mask;
1564
        VPN = tlb->VPN & ~mask;
1565
        /* Check ASID, virtual page number & size */
1566
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1567
            /* TLB match */
1568
            env->CP0_Index = i;
1569
            break;
1570
        }
1571
    }
1572
    if (i == env->tlb->nb_tlb) {
1573
        /* No match.  Discard any shadow entries, if any of them match.  */
1574
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1575
            tlb = &env->tlb->mmu.r4k.tlb[i];
1576
            /* 1k pages are not supported. */
1577
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1578
            tag = env->CP0_EntryHi & ~mask;
1579
            VPN = tlb->VPN & ~mask;
1580
            /* Check ASID, virtual page number & size */
1581
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1582
                r4k_mips_tlb_flush_extra (env, i);
1583
                break;
1584
            }
1585
        }
1586

    
1587
        env->CP0_Index |= 0x80000000;
1588
    }
1589
}
1590

    
1591
void r4k_helper_tlbr (void)
1592
{
1593
    r4k_tlb_t *tlb;
1594
    uint8_t ASID;
1595
    int idx;
1596

    
1597
    ASID = env->CP0_EntryHi & 0xFF;
1598
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1599
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1600

    
1601
    /* If this will change the current ASID, flush qemu's TLB.  */
1602
    if (ASID != tlb->ASID)
1603
        cpu_mips_tlb_flush (env, 1);
1604

    
1605
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1606

    
1607
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1608
    env->CP0_PageMask = tlb->PageMask;
1609
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1610
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1611
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1612
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1613
}
1614

    
1615
void helper_tlbwi(void)
1616
{
1617
    env->tlb->helper_tlbwi();
1618
}
1619

    
1620
void helper_tlbwr(void)
1621
{
1622
    env->tlb->helper_tlbwr();
1623
}
1624

    
1625
void helper_tlbp(void)
1626
{
1627
    env->tlb->helper_tlbp();
1628
}
1629

    
1630
void helper_tlbr(void)
1631
{
1632
    env->tlb->helper_tlbr();
1633
}
1634

    
1635
/* Specials */
1636
target_ulong helper_di (void)
1637
{
1638
    target_ulong t0 = env->CP0_Status;
1639

    
1640
    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1641
    cpu_mips_update_irq(env);
1642

    
1643
    return t0;
1644
}
1645

    
1646
target_ulong helper_ei (void)
1647
{
1648
    target_ulong t0 = env->CP0_Status;
1649

    
1650
    env->CP0_Status = t0 | (1 << CP0St_IE);
1651
    cpu_mips_update_irq(env);
1652

    
1653
    return t0;
1654
}
1655

    
1656
static void debug_pre_eret (void)
1657
{
1658
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1659
        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1660
                env->active_tc.PC, env->CP0_EPC);
1661
        if (env->CP0_Status & (1 << CP0St_ERL))
1662
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1663
        if (env->hflags & MIPS_HFLAG_DM)
1664
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1665
        qemu_log("\n");
1666
    }
1667
}
1668

    
1669
static void debug_post_eret (void)
1670
{
1671
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1672
        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1673
                env->active_tc.PC, env->CP0_EPC);
1674
        if (env->CP0_Status & (1 << CP0St_ERL))
1675
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1676
        if (env->hflags & MIPS_HFLAG_DM)
1677
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1678
        switch (env->hflags & MIPS_HFLAG_KSU) {
1679
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1680
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1681
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1682
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1683
        }
1684
    }
1685
}
1686

    
1687
void helper_eret (void)
1688
{
1689
    debug_pre_eret();
1690
    if (env->CP0_Status & (1 << CP0St_ERL)) {
1691
        env->active_tc.PC = env->CP0_ErrorEPC;
1692
        env->CP0_Status &= ~(1 << CP0St_ERL);
1693
    } else {
1694
        env->active_tc.PC = env->CP0_EPC;
1695
        env->CP0_Status &= ~(1 << CP0St_EXL);
1696
    }
1697
    compute_hflags(env);
1698
    debug_post_eret();
1699
    env->lladdr = 1;
1700
}
1701

    
1702
void helper_deret (void)
1703
{
1704
    debug_pre_eret();
1705
    env->active_tc.PC = env->CP0_DEPC;
1706
    env->hflags &= MIPS_HFLAG_DM;
1707
    compute_hflags(env);
1708
    debug_post_eret();
1709
    env->lladdr = 1;
1710
}
1711
#endif /* !CONFIG_USER_ONLY */
1712

    
1713
target_ulong helper_rdhwr_cpunum(void)
1714
{
1715
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1716
        (env->CP0_HWREna & (1 << 0)))
1717
        return env->CP0_EBase & 0x3ff;
1718
    else
1719
        helper_raise_exception(EXCP_RI);
1720

    
1721
    return 0;
1722
}
1723

    
1724
target_ulong helper_rdhwr_synci_step(void)
1725
{
1726
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1727
        (env->CP0_HWREna & (1 << 1)))
1728
        return env->SYNCI_Step;
1729
    else
1730
        helper_raise_exception(EXCP_RI);
1731

    
1732
    return 0;
1733
}
1734

    
1735
target_ulong helper_rdhwr_cc(void)
1736
{
1737
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1738
        (env->CP0_HWREna & (1 << 2)))
1739
        return env->CP0_Count;
1740
    else
1741
        helper_raise_exception(EXCP_RI);
1742

    
1743
    return 0;
1744
}
1745

    
1746
target_ulong helper_rdhwr_ccres(void)
1747
{
1748
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1749
        (env->CP0_HWREna & (1 << 3)))
1750
        return env->CCRes;
1751
    else
1752
        helper_raise_exception(EXCP_RI);
1753

    
1754
    return 0;
1755
}
1756

    
1757
void helper_pmon (int function)
1758
{
1759
    function /= 2;
1760
    switch (function) {
1761
    case 2: /* TODO: char inbyte(int waitflag); */
1762
        if (env->active_tc.gpr[4] == 0)
1763
            env->active_tc.gpr[2] = -1;
1764
        /* Fall through */
1765
    case 11: /* TODO: char inbyte (void); */
1766
        env->active_tc.gpr[2] = -1;
1767
        break;
1768
    case 3:
1769
    case 12:
1770
        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1771
        break;
1772
    case 17:
1773
        break;
1774
    case 158:
1775
        {
1776
            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
1777
            printf("%s", fmt);
1778
        }
1779
        break;
1780
    }
1781
}
1782

    
1783
void helper_wait (void)
1784
{
1785
    env->halted = 1;
1786
    helper_raise_exception(EXCP_HLT);
1787
}
1788

    
1789
#if !defined(CONFIG_USER_ONLY)
1790

    
1791
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1792

    
1793
#define MMUSUFFIX _mmu
1794
#define ALIGNED_ONLY
1795

    
1796
#define SHIFT 0
1797
#include "softmmu_template.h"
1798

    
1799
#define SHIFT 1
1800
#include "softmmu_template.h"
1801

    
1802
#define SHIFT 2
1803
#include "softmmu_template.h"
1804

    
1805
#define SHIFT 3
1806
#include "softmmu_template.h"
1807

    
1808
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
1809
{
1810
    env->CP0_BadVAddr = addr;
1811
    do_restore_state (retaddr);
1812
    helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
1813
}
1814

    
1815
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1816
{
1817
    TranslationBlock *tb;
1818
    CPUState *saved_env;
1819
    unsigned long pc;
1820
    int ret;
1821

    
1822
    /* XXX: hack to restore env in all cases, even if not called from
1823
       generated code */
1824
    saved_env = env;
1825
    env = cpu_single_env;
1826
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1827
    if (ret) {
1828
        if (retaddr) {
1829
            /* now we have a real cpu fault */
1830
            pc = (unsigned long)retaddr;
1831
            tb = tb_find_pc(pc);
1832
            if (tb) {
1833
                /* the PC is inside the translated code. It means that we have
1834
                   a virtual CPU fault */
1835
                cpu_restore_state(tb, env, pc, NULL);
1836
            }
1837
        }
1838
        helper_raise_exception_err(env->exception_index, env->error_code);
1839
    }
1840
    env = saved_env;
1841
}
1842

    
1843
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
1844
                          int unused, int size)
1845
{
1846
    if (is_exec)
1847
        helper_raise_exception(EXCP_IBE);
1848
    else
1849
        helper_raise_exception(EXCP_DBE);
1850
}
1851
#endif /* !CONFIG_USER_ONLY */
1852

    
1853
/* Complex FPU operations which may need stack space. */
1854

    
1855
#define FLOAT_ONE32 make_float32(0x3f8 << 20)
1856
#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
1857
#define FLOAT_TWO32 make_float32(1 << 30)
1858
#define FLOAT_TWO64 make_float64(1ULL << 62)
1859
#define FLOAT_QNAN32 0x7fbfffff
1860
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
1861
#define FLOAT_SNAN32 0x7fffffff
1862
#define FLOAT_SNAN64 0x7fffffffffffffffULL
1863

    
1864
/* convert MIPS rounding mode in FCR31 to IEEE library */
1865
static unsigned int ieee_rm[] = {
1866
    float_round_nearest_even,
1867
    float_round_to_zero,
1868
    float_round_up,
1869
    float_round_down
1870
};
1871

    
1872
#define RESTORE_ROUNDING_MODE \
1873
    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1874

    
1875
#define RESTORE_FLUSH_MODE \
1876
    set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
1877

    
1878
target_ulong helper_cfc1 (uint32_t reg)
1879
{
1880
    target_ulong arg1;
1881

    
1882
    switch (reg) {
1883
    case 0:
1884
        arg1 = (int32_t)env->active_fpu.fcr0;
1885
        break;
1886
    case 25:
1887
        arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
1888
        break;
1889
    case 26:
1890
        arg1 = env->active_fpu.fcr31 & 0x0003f07c;
1891
        break;
1892
    case 28:
1893
        arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
1894
        break;
1895
    default:
1896
        arg1 = (int32_t)env->active_fpu.fcr31;
1897
        break;
1898
    }
1899

    
1900
    return arg1;
1901
}
1902

    
1903
void helper_ctc1 (target_ulong arg1, uint32_t reg)
1904
{
1905
    switch(reg) {
1906
    case 25:
1907
        if (arg1 & 0xffffff00)
1908
            return;
1909
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
1910
                     ((arg1 & 0x1) << 23);
1911
        break;
1912
    case 26:
1913
        if (arg1 & 0x007c0000)
1914
            return;
1915
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
1916
        break;
1917
    case 28:
1918
        if (arg1 & 0x007c0000)
1919
            return;
1920
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
1921
                     ((arg1 & 0x4) << 22);
1922
        break;
1923
    case 31:
1924
        if (arg1 & 0x007c0000)
1925
            return;
1926
        env->active_fpu.fcr31 = arg1;
1927
        break;
1928
    default:
1929
        return;
1930
    }
1931
    /* set rounding mode */
1932
    RESTORE_ROUNDING_MODE;
1933
    /* set flush-to-zero mode */
1934
    RESTORE_FLUSH_MODE;
1935
    set_float_exception_flags(0, &env->active_fpu.fp_status);
1936
    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
1937
        helper_raise_exception(EXCP_FPE);
1938
}
1939

    
1940
static inline char ieee_ex_to_mips(char xcpt)
1941
{
1942
    return (xcpt & float_flag_inexact) >> 5 |
1943
           (xcpt & float_flag_underflow) >> 3 |
1944
           (xcpt & float_flag_overflow) >> 1 |
1945
           (xcpt & float_flag_divbyzero) << 1 |
1946
           (xcpt & float_flag_invalid) << 4;
1947
}
1948

    
1949
static inline char mips_ex_to_ieee(char xcpt)
1950
{
1951
    return (xcpt & FP_INEXACT) << 5 |
1952
           (xcpt & FP_UNDERFLOW) << 3 |
1953
           (xcpt & FP_OVERFLOW) << 1 |
1954
           (xcpt & FP_DIV0) >> 1 |
1955
           (xcpt & FP_INVALID) >> 4;
1956
}
1957

    
1958
static inline void update_fcr31(void)
1959
{
1960
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
1961

    
1962
    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
1963
    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
1964
        helper_raise_exception(EXCP_FPE);
1965
    else
1966
        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
1967
}
1968

    
1969
/* Float support.
1970
   Single precition routines have a "s" suffix, double precision a
1971
   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
1972
   paired single lower "pl", paired single upper "pu".  */
1973

    
1974
/* unary operations, modifying fp status  */
1975
uint64_t helper_float_sqrt_d(uint64_t fdt0)
1976
{
1977
    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
1978
}
1979

    
1980
uint32_t helper_float_sqrt_s(uint32_t fst0)
1981
{
1982
    return float32_sqrt(fst0, &env->active_fpu.fp_status);
1983
}
1984

    
1985
uint64_t helper_float_cvtd_s(uint32_t fst0)
1986
{
1987
    uint64_t fdt2;
1988

    
1989
    set_float_exception_flags(0, &env->active_fpu.fp_status);
1990
    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
1991
    update_fcr31();
1992
    return fdt2;
1993
}
1994

    
1995
uint64_t helper_float_cvtd_w(uint32_t wt0)
1996
{
1997
    uint64_t fdt2;
1998

    
1999
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2000
    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2001
    update_fcr31();
2002
    return fdt2;
2003
}
2004

    
2005
uint64_t helper_float_cvtd_l(uint64_t dt0)
2006
{
2007
    uint64_t fdt2;
2008

    
2009
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2010
    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2011
    update_fcr31();
2012
    return fdt2;
2013
}
2014

    
2015
uint64_t helper_float_cvtl_d(uint64_t fdt0)
2016
{
2017
    uint64_t dt2;
2018

    
2019
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2020
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2021
    update_fcr31();
2022
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2023
        dt2 = FLOAT_SNAN64;
2024
    return dt2;
2025
}
2026

    
2027
uint64_t helper_float_cvtl_s(uint32_t fst0)
2028
{
2029
    uint64_t dt2;
2030

    
2031
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2032
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2033
    update_fcr31();
2034
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2035
        dt2 = FLOAT_SNAN64;
2036
    return dt2;
2037
}
2038

    
2039
uint64_t helper_float_cvtps_pw(uint64_t dt0)
2040
{
2041
    uint32_t fst2;
2042
    uint32_t fsth2;
2043

    
2044
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2045
    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2046
    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2047
    update_fcr31();
2048
    return ((uint64_t)fsth2 << 32) | fst2;
2049
}
2050

    
2051
uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2052
{
2053
    uint32_t wt2;
2054
    uint32_t wth2;
2055

    
2056
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2057
    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2058
    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2059
    update_fcr31();
2060
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2061
        wt2 = FLOAT_SNAN32;
2062
        wth2 = FLOAT_SNAN32;
2063
    }
2064
    return ((uint64_t)wth2 << 32) | wt2;
2065
}
2066

    
2067
uint32_t helper_float_cvts_d(uint64_t fdt0)
2068
{
2069
    uint32_t fst2;
2070

    
2071
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2072
    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2073
    update_fcr31();
2074
    return fst2;
2075
}
2076

    
2077
uint32_t helper_float_cvts_w(uint32_t wt0)
2078
{
2079
    uint32_t fst2;
2080

    
2081
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2082
    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2083
    update_fcr31();
2084
    return fst2;
2085
}
2086

    
2087
uint32_t helper_float_cvts_l(uint64_t dt0)
2088
{
2089
    uint32_t fst2;
2090

    
2091
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2092
    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2093
    update_fcr31();
2094
    return fst2;
2095
}
2096

    
2097
uint32_t helper_float_cvts_pl(uint32_t wt0)
2098
{
2099
    uint32_t wt2;
2100

    
2101
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2102
    wt2 = wt0;
2103
    update_fcr31();
2104
    return wt2;
2105
}
2106

    
2107
uint32_t helper_float_cvts_pu(uint32_t wth0)
2108
{
2109
    uint32_t wt2;
2110

    
2111
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2112
    wt2 = wth0;
2113
    update_fcr31();
2114
    return wt2;
2115
}
2116

    
2117
uint32_t helper_float_cvtw_s(uint32_t fst0)
2118
{
2119
    uint32_t wt2;
2120

    
2121
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2122
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2123
    update_fcr31();
2124
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2125
        wt2 = FLOAT_SNAN32;
2126
    return wt2;
2127
}
2128

    
2129
uint32_t helper_float_cvtw_d(uint64_t fdt0)
2130
{
2131
    uint32_t wt2;
2132

    
2133
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2134
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2135
    update_fcr31();
2136
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2137
        wt2 = FLOAT_SNAN32;
2138
    return wt2;
2139
}
2140

    
2141
uint64_t helper_float_roundl_d(uint64_t fdt0)
2142
{
2143
    uint64_t dt2;
2144

    
2145
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2146
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2147
    RESTORE_ROUNDING_MODE;
2148
    update_fcr31();
2149
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2150
        dt2 = FLOAT_SNAN64;
2151
    return dt2;
2152
}
2153

    
2154
uint64_t helper_float_roundl_s(uint32_t fst0)
2155
{
2156
    uint64_t dt2;
2157

    
2158
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2159
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2160
    RESTORE_ROUNDING_MODE;
2161
    update_fcr31();
2162
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2163
        dt2 = FLOAT_SNAN64;
2164
    return dt2;
2165
}
2166

    
2167
uint32_t helper_float_roundw_d(uint64_t fdt0)
2168
{
2169
    uint32_t wt2;
2170

    
2171
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2172
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2173
    RESTORE_ROUNDING_MODE;
2174
    update_fcr31();
2175
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2176
        wt2 = FLOAT_SNAN32;
2177
    return wt2;
2178
}
2179

    
2180
uint32_t helper_float_roundw_s(uint32_t fst0)
2181
{
2182
    uint32_t wt2;
2183

    
2184
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2185
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2186
    RESTORE_ROUNDING_MODE;
2187
    update_fcr31();
2188
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2189
        wt2 = FLOAT_SNAN32;
2190
    return wt2;
2191
}
2192

    
2193
uint64_t helper_float_truncl_d(uint64_t fdt0)
2194
{
2195
    uint64_t dt2;
2196

    
2197
    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2198
    update_fcr31();
2199
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2200
        dt2 = FLOAT_SNAN64;
2201
    return dt2;
2202
}
2203

    
2204
uint64_t helper_float_truncl_s(uint32_t fst0)
2205
{
2206
    uint64_t dt2;
2207

    
2208
    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2209
    update_fcr31();
2210
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2211
        dt2 = FLOAT_SNAN64;
2212
    return dt2;
2213
}
2214

    
2215
uint32_t helper_float_truncw_d(uint64_t fdt0)
2216
{
2217
    uint32_t wt2;
2218

    
2219
    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2220
    update_fcr31();
2221
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2222
        wt2 = FLOAT_SNAN32;
2223
    return wt2;
2224
}
2225

    
2226
uint32_t helper_float_truncw_s(uint32_t fst0)
2227
{
2228
    uint32_t wt2;
2229

    
2230
    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2231
    update_fcr31();
2232
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2233
        wt2 = FLOAT_SNAN32;
2234
    return wt2;
2235
}
2236

    
2237
uint64_t helper_float_ceill_d(uint64_t fdt0)
2238
{
2239
    uint64_t dt2;
2240

    
2241
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2242
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2243
    RESTORE_ROUNDING_MODE;
2244
    update_fcr31();
2245
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2246
        dt2 = FLOAT_SNAN64;
2247
    return dt2;
2248
}
2249

    
2250
uint64_t helper_float_ceill_s(uint32_t fst0)
2251
{
2252
    uint64_t dt2;
2253

    
2254
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2255
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2256
    RESTORE_ROUNDING_MODE;
2257
    update_fcr31();
2258
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2259
        dt2 = FLOAT_SNAN64;
2260
    return dt2;
2261
}
2262

    
2263
uint32_t helper_float_ceilw_d(uint64_t fdt0)
2264
{
2265
    uint32_t wt2;
2266

    
2267
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2268
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2269
    RESTORE_ROUNDING_MODE;
2270
    update_fcr31();
2271
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2272
        wt2 = FLOAT_SNAN32;
2273
    return wt2;
2274
}
2275

    
2276
uint32_t helper_float_ceilw_s(uint32_t fst0)
2277
{
2278
    uint32_t wt2;
2279

    
2280
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2281
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2282
    RESTORE_ROUNDING_MODE;
2283
    update_fcr31();
2284
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2285
        wt2 = FLOAT_SNAN32;
2286
    return wt2;
2287
}
2288

    
2289
uint64_t helper_float_floorl_d(uint64_t fdt0)
2290
{
2291
    uint64_t dt2;
2292

    
2293
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2294
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2295
    RESTORE_ROUNDING_MODE;
2296
    update_fcr31();
2297
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2298
        dt2 = FLOAT_SNAN64;
2299
    return dt2;
2300
}
2301

    
2302
uint64_t helper_float_floorl_s(uint32_t fst0)
2303
{
2304
    uint64_t dt2;
2305

    
2306
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2307
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2308
    RESTORE_ROUNDING_MODE;
2309
    update_fcr31();
2310
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2311
        dt2 = FLOAT_SNAN64;
2312
    return dt2;
2313
}
2314

    
2315
uint32_t helper_float_floorw_d(uint64_t fdt0)
2316
{
2317
    uint32_t wt2;
2318

    
2319
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2320
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2321
    RESTORE_ROUNDING_MODE;
2322
    update_fcr31();
2323
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2324
        wt2 = FLOAT_SNAN32;
2325
    return wt2;
2326
}
2327

    
2328
uint32_t helper_float_floorw_s(uint32_t fst0)
2329
{
2330
    uint32_t wt2;
2331

    
2332
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2333
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2334
    RESTORE_ROUNDING_MODE;
2335
    update_fcr31();
2336
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2337
        wt2 = FLOAT_SNAN32;
2338
    return wt2;
2339
}
2340

    
2341
/* unary operations, not modifying fp status  */
2342
#define FLOAT_UNOP(name)                                       \
2343
uint64_t helper_float_ ## name ## _d(uint64_t fdt0)                \
2344
{                                                              \
2345
    return float64_ ## name(fdt0);                             \
2346
}                                                              \
2347
uint32_t helper_float_ ## name ## _s(uint32_t fst0)                \
2348
{                                                              \
2349
    return float32_ ## name(fst0);                             \
2350
}                                                              \
2351
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0)               \
2352
{                                                              \
2353
    uint32_t wt0;                                              \
2354
    uint32_t wth0;                                             \
2355
                                                               \
2356
    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2357
    wth0 = float32_ ## name(fdt0 >> 32);                       \
2358
    return ((uint64_t)wth0 << 32) | wt0;                       \
2359
}
2360
FLOAT_UNOP(abs)
2361
FLOAT_UNOP(chs)
2362
#undef FLOAT_UNOP
2363

    
2364
/* MIPS specific unary operations */
2365
uint64_t helper_float_recip_d(uint64_t fdt0)
2366
{
2367
    uint64_t fdt2;
2368

    
2369
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2370
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2371
    update_fcr31();
2372
    return fdt2;
2373
}
2374

    
2375
uint32_t helper_float_recip_s(uint32_t fst0)
2376
{
2377
    uint32_t fst2;
2378

    
2379
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2380
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2381
    update_fcr31();
2382
    return fst2;
2383
}
2384

    
2385
uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2386
{
2387
    uint64_t fdt2;
2388

    
2389
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2390
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2391
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2392
    update_fcr31();
2393
    return fdt2;
2394
}
2395

    
2396
uint32_t helper_float_rsqrt_s(uint32_t fst0)
2397
{
2398
    uint32_t fst2;
2399

    
2400
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2401
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2402
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2403
    update_fcr31();
2404
    return fst2;
2405
}
2406

    
2407
uint64_t helper_float_recip1_d(uint64_t fdt0)
2408
{
2409
    uint64_t fdt2;
2410

    
2411
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2412
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2413
    update_fcr31();
2414
    return fdt2;
2415
}
2416

    
2417
uint32_t helper_float_recip1_s(uint32_t fst0)
2418
{
2419
    uint32_t fst2;
2420

    
2421
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2422
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2423
    update_fcr31();
2424
    return fst2;
2425
}
2426

    
2427
uint64_t helper_float_recip1_ps(uint64_t fdt0)
2428
{
2429
    uint32_t fst2;
2430
    uint32_t fsth2;
2431

    
2432
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2433
    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2434
    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2435
    update_fcr31();
2436
    return ((uint64_t)fsth2 << 32) | fst2;
2437
}
2438

    
2439
uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2440
{
2441
    uint64_t fdt2;
2442

    
2443
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2444
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2445
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2446
    update_fcr31();
2447
    return fdt2;
2448
}
2449

    
2450
uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2451
{
2452
    uint32_t fst2;
2453

    
2454
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2455
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2456
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2457
    update_fcr31();
2458
    return fst2;
2459
}
2460

    
2461
uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2462
{
2463
    uint32_t fst2;
2464
    uint32_t fsth2;
2465

    
2466
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2467
    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2468
    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2469
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2470
    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2471
    update_fcr31();
2472
    return ((uint64_t)fsth2 << 32) | fst2;
2473
}
2474

    
2475
#define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2476

    
2477
/* binary operations */
2478
#define FLOAT_BINOP(name)                                          \
2479
uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
2480
{                                                                  \
2481
    uint64_t dt2;                                                  \
2482
                                                                   \
2483
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2484
    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
2485
    update_fcr31();                                                \
2486
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2487
        dt2 = FLOAT_QNAN64;                                        \
2488
    return dt2;                                                    \
2489
}                                                                  \
2490
                                                                   \
2491
uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
2492
{                                                                  \
2493
    uint32_t wt2;                                                  \
2494
                                                                   \
2495
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2496
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2497
    update_fcr31();                                                \
2498
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2499
        wt2 = FLOAT_QNAN32;                                        \
2500
    return wt2;                                                    \
2501
}                                                                  \
2502
                                                                   \
2503
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
2504
{                                                                  \
2505
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2506
    uint32_t fsth0 = fdt0 >> 32;                                   \
2507
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2508
    uint32_t fsth1 = fdt1 >> 32;                                   \
2509
    uint32_t wt2;                                                  \
2510
    uint32_t wth2;                                                 \
2511
                                                                   \
2512
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2513
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2514
    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
2515
    update_fcr31();                                                \
2516
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
2517
        wt2 = FLOAT_QNAN32;                                        \
2518
        wth2 = FLOAT_QNAN32;                                       \
2519
    }                                                              \
2520
    return ((uint64_t)wth2 << 32) | wt2;                           \
2521
}
2522

    
2523
FLOAT_BINOP(add)
2524
FLOAT_BINOP(sub)
2525
FLOAT_BINOP(mul)
2526
FLOAT_BINOP(div)
2527
#undef FLOAT_BINOP
2528

    
2529
/* ternary operations */
2530
#define FLOAT_TERNOP(name1, name2)                                        \
2531
uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
2532
                                           uint64_t fdt2)                 \
2533
{                                                                         \
2534
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2535
    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2536
}                                                                         \
2537
                                                                          \
2538
uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
2539
                                           uint32_t fst2)                 \
2540
{                                                                         \
2541
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2542
    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2543
}                                                                         \
2544
                                                                          \
2545
uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2546
                                            uint64_t fdt2)                \
2547
{                                                                         \
2548
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2549
    uint32_t fsth0 = fdt0 >> 32;                                          \
2550
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2551
    uint32_t fsth1 = fdt1 >> 32;                                          \
2552
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2553
    uint32_t fsth2 = fdt2 >> 32;                                          \
2554
                                                                          \
2555
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2556
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2557
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2558
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2559
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2560
}
2561

    
2562
FLOAT_TERNOP(mul, add)
2563
FLOAT_TERNOP(mul, sub)
2564
#undef FLOAT_TERNOP
2565

    
2566
/* negated ternary operations */
2567
#define FLOAT_NTERNOP(name1, name2)                                       \
2568
uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2569
                                           uint64_t fdt2)                 \
2570
{                                                                         \
2571
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2572
    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2573
    return float64_chs(fdt2);                                             \
2574
}                                                                         \
2575
                                                                          \
2576
uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2577
                                           uint32_t fst2)                 \
2578
{                                                                         \
2579
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2580
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2581
    return float32_chs(fst2);                                             \
2582
}                                                                         \
2583
                                                                          \
2584
uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2585
                                           uint64_t fdt2)                 \
2586
{                                                                         \
2587
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2588
    uint32_t fsth0 = fdt0 >> 32;                                          \
2589
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2590
    uint32_t fsth1 = fdt1 >> 32;                                          \
2591
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2592
    uint32_t fsth2 = fdt2 >> 32;                                          \
2593
                                                                          \
2594
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2595
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2596
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2597
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2598
    fst2 = float32_chs(fst2);                                             \
2599
    fsth2 = float32_chs(fsth2);                                           \
2600
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2601
}
2602

    
2603
FLOAT_NTERNOP(mul, add)
2604
FLOAT_NTERNOP(mul, sub)
2605
#undef FLOAT_NTERNOP
2606

    
2607
/* MIPS specific binary operations */
2608
uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2609
{
2610
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2611
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2612
    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2613
    update_fcr31();
2614
    return fdt2;
2615
}
2616

    
2617
uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
2618
{
2619
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2620
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2621
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2622
    update_fcr31();
2623
    return fst2;
2624
}
2625

    
2626
uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2627
{
2628
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2629
    uint32_t fsth0 = fdt0 >> 32;
2630
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2631
    uint32_t fsth2 = fdt2 >> 32;
2632

    
2633
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2634
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2635
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2636
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2637
    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
2638
    update_fcr31();
2639
    return ((uint64_t)fsth2 << 32) | fst2;
2640
}
2641

    
2642
uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
2643
{
2644
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2645
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2646
    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
2647
    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
2648
    update_fcr31();
2649
    return fdt2;
2650
}
2651

    
2652
uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
2653
{
2654
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2655
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2656
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2657
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2658
    update_fcr31();
2659
    return fst2;
2660
}
2661

    
2662
uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
2663
{
2664
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2665
    uint32_t fsth0 = fdt0 >> 32;
2666
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2667
    uint32_t fsth2 = fdt2 >> 32;
2668

    
2669
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2670
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2671
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2672
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2673
    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
2674
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2675
    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
2676
    update_fcr31();
2677
    return ((uint64_t)fsth2 << 32) | fst2;
2678
}
2679

    
2680
uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
2681
{
2682
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2683
    uint32_t fsth0 = fdt0 >> 32;
2684
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2685
    uint32_t fsth1 = fdt1 >> 32;
2686
    uint32_t fst2;
2687
    uint32_t fsth2;
2688

    
2689
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2690
    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
2691
    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
2692
    update_fcr31();
2693
    return ((uint64_t)fsth2 << 32) | fst2;
2694
}
2695

    
2696
uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
2697
{
2698
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2699
    uint32_t fsth0 = fdt0 >> 32;
2700
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2701
    uint32_t fsth1 = fdt1 >> 32;
2702
    uint32_t fst2;
2703
    uint32_t fsth2;
2704

    
2705
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2706
    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
2707
    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
2708
    update_fcr31();
2709
    return ((uint64_t)fsth2 << 32) | fst2;
2710
}
2711

    
2712
/* compare operations */
2713
#define FOP_COND_D(op, cond)                                   \
2714
void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2715
{                                                              \
2716
    int c = cond;                                              \
2717
    update_fcr31();                                            \
2718
    if (c)                                                     \
2719
        SET_FP_COND(cc, env->active_fpu);                      \
2720
    else                                                       \
2721
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2722
}                                                              \
2723
void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2724
{                                                              \
2725
    int c;                                                     \
2726
    fdt0 = float64_abs(fdt0);                                  \
2727
    fdt1 = float64_abs(fdt1);                                  \
2728
    c = cond;                                                  \
2729
    update_fcr31();                                            \
2730
    if (c)                                                     \
2731
        SET_FP_COND(cc, env->active_fpu);                      \
2732
    else                                                       \
2733
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2734
}
2735

    
2736
static int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
2737
{
2738
    if (float64_is_signaling_nan(a) ||
2739
        float64_is_signaling_nan(b) ||
2740
        (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
2741
        float_raise(float_flag_invalid, status);
2742
        return 1;
2743
    } else if (float64_is_nan(a) || float64_is_nan(b)) {
2744
        return 1;
2745
    } else {
2746
        return 0;
2747
    }
2748
}
2749

    
2750
/* NOTE: the comma operator will make "cond" to eval to false,
2751
 * but float*_is_unordered() is still called. */
2752
FOP_COND_D(f,   (float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2753
FOP_COND_D(un,  float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status))
2754
FOP_COND_D(eq,  !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2755
FOP_COND_D(ueq, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2756
FOP_COND_D(olt, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2757
FOP_COND_D(ult, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2758
FOP_COND_D(ole, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2759
FOP_COND_D(ule, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2760
/* NOTE: the comma operator will make "cond" to eval to false,
2761
 * but float*_is_unordered() is still called. */
2762
FOP_COND_D(sf,  (float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2763
FOP_COND_D(ngle,float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status))
2764
FOP_COND_D(seq, !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2765
FOP_COND_D(ngl, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2766
FOP_COND_D(lt,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2767
FOP_COND_D(nge, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2768
FOP_COND_D(le,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2769
FOP_COND_D(ngt, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2770

    
2771
#define FOP_COND_S(op, cond)                                   \
2772
void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
2773
{                                                              \
2774
    int c = cond;                                              \
2775
    update_fcr31();                                            \
2776
    if (c)                                                     \
2777
        SET_FP_COND(cc, env->active_fpu);                      \
2778
    else                                                       \
2779
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2780
}                                                              \
2781
void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2782
{                                                              \
2783
    int c;                                                     \
2784
    fst0 = float32_abs(fst0);                                  \
2785
    fst1 = float32_abs(fst1);                                  \
2786
    c = cond;                                                  \
2787
    update_fcr31();                                            \
2788
    if (c)                                                     \
2789
        SET_FP_COND(cc, env->active_fpu);                      \
2790
    else                                                       \
2791
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2792
}
2793

    
2794
static flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
2795
{
2796
    if (float32_is_signaling_nan(a) ||
2797
        float32_is_signaling_nan(b) ||
2798
        (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
2799
        float_raise(float_flag_invalid, status);
2800
        return 1;
2801
    } else if (float32_is_nan(a) || float32_is_nan(b)) {
2802
        return 1;
2803
    } else {
2804
        return 0;
2805
    }
2806
}
2807

    
2808
/* NOTE: the comma operator will make "cond" to eval to false,
2809
 * but float*_is_unordered() is still called. */
2810
FOP_COND_S(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0))
2811
FOP_COND_S(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status))
2812
FOP_COND_S(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2813
FOP_COND_S(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2814
FOP_COND_S(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2815
FOP_COND_S(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2816
FOP_COND_S(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2817
FOP_COND_S(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2818
/* NOTE: the comma operator will make "cond" to eval to false,
2819
 * but float*_is_unordered() is still called. */
2820
FOP_COND_S(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0))
2821
FOP_COND_S(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status))
2822
FOP_COND_S(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2823
FOP_COND_S(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2824
FOP_COND_S(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2825
FOP_COND_S(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2826
FOP_COND_S(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2827
FOP_COND_S(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2828

    
2829
#define FOP_COND_PS(op, condl, condh)                           \
2830
void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2831
{                                                               \
2832
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2833
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
2834
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
2835
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
2836
    int cl = condl;                                             \
2837
    int ch = condh;                                             \
2838
                                                                \
2839
    update_fcr31();                                             \
2840
    if (cl)                                                     \
2841
        SET_FP_COND(cc, env->active_fpu);                       \
2842
    else                                                        \
2843
        CLEAR_FP_COND(cc, env->active_fpu);                     \
2844
    if (ch)                                                     \
2845
        SET_FP_COND(cc + 1, env->active_fpu);                   \
2846
    else                                                        \
2847
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
2848
}                                                               \
2849
void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2850
{                                                               \
2851
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2852
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
2853
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
2854
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
2855
    int cl = condl;                                             \
2856
    int ch = condh;                                             \
2857
                                                                \
2858
    update_fcr31();                                             \
2859
    if (cl)                                                     \
2860
        SET_FP_COND(cc, env->active_fpu);                       \
2861
    else                                                        \
2862
        CLEAR_FP_COND(cc, env->active_fpu);                     \
2863
    if (ch)                                                     \
2864
        SET_FP_COND(cc + 1, env->active_fpu);                   \
2865
    else                                                        \
2866
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
2867
}
2868

    
2869
/* NOTE: the comma operator will make "cond" to eval to false,
2870
 * but float*_is_unordered() is still called. */
2871
FOP_COND_PS(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0),
2872
                 (float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status), 0))
2873
FOP_COND_PS(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status),
2874
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status))
2875
FOP_COND_PS(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2876
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2877
FOP_COND_PS(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2878
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2879
FOP_COND_PS(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2880
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2881
FOP_COND_PS(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2882
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2883
FOP_COND_PS(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
2884
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
2885
FOP_COND_PS(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
2886
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
2887
/* NOTE: the comma operator will make "cond" to eval to false,
2888
 * but float*_is_unordered() is still called. */
2889
FOP_COND_PS(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0),
2890
                 (float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status), 0))
2891
FOP_COND_PS(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status),
2892
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status))
2893
FOP_COND_PS(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2894
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2895
FOP_COND_PS(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2896
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2897
FOP_COND_PS(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2898
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2899
FOP_COND_PS(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2900
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2901
FOP_COND_PS(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
2902
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
2903
FOP_COND_PS(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
2904
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))