Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ df2dbb4a

History | View | Annotate | Download (95.9 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdlib.h>
20
#include "exec.h"
21

    
22
#include "host-utils.h"
23

    
24
#include "helper.h"
25

    
26
#ifndef CONFIG_USER_ONLY
27
static inline void cpu_mips_tlb_flush (CPUState *env, int flush_global);
28
#endif
29

    
30
/*****************************************************************************/
31
/* Exceptions processing helpers */
32

    
33
void helper_raise_exception_err (uint32_t exception, int error_code)
34
{
35
#if 1
36
    if (exception < 0x100)
37
        qemu_log("%s: %d %d\n", __func__, exception, error_code);
38
#endif
39
    env->exception_index = exception;
40
    env->error_code = error_code;
41
    cpu_loop_exit();
42
}
43

    
44
void helper_raise_exception (uint32_t exception)
45
{
46
    helper_raise_exception_err(exception, 0);
47
}
48

    
49
#if !defined(CONFIG_USER_ONLY)
50
static void do_restore_state (void *pc_ptr)
51
{
52
    TranslationBlock *tb;
53
    unsigned long pc = (unsigned long) pc_ptr;
54
    
55
    tb = tb_find_pc (pc);
56
    if (tb) {
57
        cpu_restore_state (tb, env, pc, NULL);
58
    }
59
}
60
#endif
61

    
62
#if defined(CONFIG_USER_ONLY)
63
#define HELPER_LD(name, insn, type)                                     \
64
static inline type do_##name(target_ulong addr, int mem_idx)            \
65
{                                                                       \
66
    return (type) insn##_raw(addr);                                     \
67
}
68
#else
69
#define HELPER_LD(name, insn, type)                                     \
70
static inline type do_##name(target_ulong addr, int mem_idx)            \
71
{                                                                       \
72
    switch (mem_idx)                                                    \
73
    {                                                                   \
74
    case 0: return (type) insn##_kernel(addr); break;                   \
75
    case 1: return (type) insn##_super(addr); break;                    \
76
    default:                                                            \
77
    case 2: return (type) insn##_user(addr); break;                     \
78
    }                                                                   \
79
}
80
#endif
81
HELPER_LD(lbu, ldub, uint8_t)
82
HELPER_LD(lw, ldl, int32_t)
83
#ifdef TARGET_MIPS64
84
HELPER_LD(ld, ldq, int64_t)
85
#endif
86
#undef HELPER_LD
87

    
88
#if defined(CONFIG_USER_ONLY)
89
#define HELPER_ST(name, insn, type)                                     \
90
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
91
{                                                                       \
92
    insn##_raw(addr, val);                                              \
93
}
94
#else
95
#define HELPER_ST(name, insn, type)                                     \
96
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
97
{                                                                       \
98
    switch (mem_idx)                                                    \
99
    {                                                                   \
100
    case 0: insn##_kernel(addr, val); break;                            \
101
    case 1: insn##_super(addr, val); break;                             \
102
    default:                                                            \
103
    case 2: insn##_user(addr, val); break;                              \
104
    }                                                                   \
105
}
106
#endif
107
HELPER_ST(sb, stb, uint8_t)
108
HELPER_ST(sw, stl, uint32_t)
109
#ifdef TARGET_MIPS64
110
HELPER_ST(sd, stq, uint64_t)
111
#endif
112
#undef HELPER_ST
113

    
114
target_ulong helper_clo (target_ulong arg1)
115
{
116
    return clo32(arg1);
117
}
118

    
119
target_ulong helper_clz (target_ulong arg1)
120
{
121
    return clz32(arg1);
122
}
123

    
124
#if defined(TARGET_MIPS64)
125
target_ulong helper_dclo (target_ulong arg1)
126
{
127
    return clo64(arg1);
128
}
129

    
130
target_ulong helper_dclz (target_ulong arg1)
131
{
132
    return clz64(arg1);
133
}
134
#endif /* TARGET_MIPS64 */
135

    
136
/* 64 bits arithmetic for 32 bits hosts */
137
static inline uint64_t get_HILO (void)
138
{
139
    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
140
}
141

    
142
static inline void set_HILO (uint64_t HILO)
143
{
144
    env->active_tc.LO[0] = (int32_t)HILO;
145
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
146
}
147

    
148
static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
149
{
150
    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
151
    arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
152
}
153

    
154
static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
155
{
156
    arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
157
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
158
}
159

    
160
/* Multiplication variants of the vr54xx. */
161
target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
162
{
163
    set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
164

    
165
    return arg1;
166
}
167

    
168
target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
169
{
170
    set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
171

    
172
    return arg1;
173
}
174

    
175
target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
176
{
177
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
178

    
179
    return arg1;
180
}
181

    
182
target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
183
{
184
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
185

    
186
    return arg1;
187
}
188

    
189
target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
190
{
191
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
192

    
193
    return arg1;
194
}
195

    
196
target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
197
{
198
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
199

    
200
    return arg1;
201
}
202

    
203
target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
204
{
205
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
206

    
207
    return arg1;
208
}
209

    
210
target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
211
{
212
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
213

    
214
    return arg1;
215
}
216

    
217
target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
218
{
219
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
220

    
221
    return arg1;
222
}
223

    
224
target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
225
{
226
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
227

    
228
    return arg1;
229
}
230

    
231
target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
232
{
233
    set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
234

    
235
    return arg1;
236
}
237

    
238
target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
239
{
240
    set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
241

    
242
    return arg1;
243
}
244

    
245
target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
246
{
247
    set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
248

    
249
    return arg1;
250
}
251

    
252
target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
253
{
254
    set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
255

    
256
    return arg1;
257
}
258

    
259
#ifdef TARGET_MIPS64
260
void helper_dmult (target_ulong arg1, target_ulong arg2)
261
{
262
    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
263
}
264

    
265
void helper_dmultu (target_ulong arg1, target_ulong arg2)
266
{
267
    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
268
}
269
#endif
270

    
271
#ifndef CONFIG_USER_ONLY
272

    
273
static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
274
{
275
    target_phys_addr_t lladdr;
276

    
277
    lladdr = cpu_mips_translate_address(env, address, rw);
278

    
279
    if (lladdr == -1LL) {
280
        cpu_loop_exit();
281
    } else {
282
        return lladdr;
283
    }
284
}
285

    
286
#define HELPER_LD_ATOMIC(name, insn)                                          \
287
target_ulong helper_##name(target_ulong arg, int mem_idx)                     \
288
{                                                                             \
289
    env->lladdr = do_translate_address(arg, 0);                               \
290
    env->llval = do_##insn(arg, mem_idx);                                     \
291
    return env->llval;                                                        \
292
}
293
HELPER_LD_ATOMIC(ll, lw)
294
#ifdef TARGET_MIPS64
295
HELPER_LD_ATOMIC(lld, ld)
296
#endif
297
#undef HELPER_LD_ATOMIC
298

    
299
#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask)                      \
300
target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
301
{                                                                             \
302
    target_long tmp;                                                          \
303
                                                                              \
304
    if (arg2 & almask) {                                                      \
305
        env->CP0_BadVAddr = arg2;                                             \
306
        helper_raise_exception(EXCP_AdES);                                    \
307
    }                                                                         \
308
    if (do_translate_address(arg2, 1) == env->lladdr) {                       \
309
        tmp = do_##ld_insn(arg2, mem_idx);                                    \
310
        if (tmp == env->llval) {                                              \
311
            do_##st_insn(arg2, arg1, mem_idx);                                \
312
            return 1;                                                         \
313
        }                                                                     \
314
    }                                                                         \
315
    return 0;                                                                 \
316
}
317
HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
318
#ifdef TARGET_MIPS64
319
HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
320
#endif
321
#undef HELPER_ST_ATOMIC
322
#endif
323

    
324
#ifdef TARGET_WORDS_BIGENDIAN
325
#define GET_LMASK(v) ((v) & 3)
326
#define GET_OFFSET(addr, offset) (addr + (offset))
327
#else
328
#define GET_LMASK(v) (((v) & 3) ^ 3)
329
#define GET_OFFSET(addr, offset) (addr - (offset))
330
#endif
331

    
332
target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
333
{
334
    target_ulong tmp;
335

    
336
    tmp = do_lbu(arg2, mem_idx);
337
    arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
338

    
339
    if (GET_LMASK(arg2) <= 2) {
340
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
341
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
342
    }
343

    
344
    if (GET_LMASK(arg2) <= 1) {
345
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
346
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
347
    }
348

    
349
    if (GET_LMASK(arg2) == 0) {
350
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
351
        arg1 = (arg1 & 0xFFFFFF00) | tmp;
352
    }
353
    return (int32_t)arg1;
354
}
355

    
356
target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
357
{
358
    target_ulong tmp;
359

    
360
    tmp = do_lbu(arg2, mem_idx);
361
    arg1 = (arg1 & 0xFFFFFF00) | tmp;
362

    
363
    if (GET_LMASK(arg2) >= 1) {
364
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
365
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
366
    }
367

    
368
    if (GET_LMASK(arg2) >= 2) {
369
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
370
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
371
    }
372

    
373
    if (GET_LMASK(arg2) == 3) {
374
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
375
        arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
376
    }
377
    return (int32_t)arg1;
378
}
379

    
380
void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
381
{
382
    do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
383

    
384
    if (GET_LMASK(arg2) <= 2)
385
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
386

    
387
    if (GET_LMASK(arg2) <= 1)
388
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
389

    
390
    if (GET_LMASK(arg2) == 0)
391
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
392
}
393

    
394
void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
395
{
396
    do_sb(arg2, (uint8_t)arg1, mem_idx);
397

    
398
    if (GET_LMASK(arg2) >= 1)
399
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
400

    
401
    if (GET_LMASK(arg2) >= 2)
402
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
403

    
404
    if (GET_LMASK(arg2) == 3)
405
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
406
}
407

    
408
#if defined(TARGET_MIPS64)
409
/* "half" load and stores.  We must do the memory access inline,
410
   or fault handling won't work.  */
411

    
412
#ifdef TARGET_WORDS_BIGENDIAN
413
#define GET_LMASK64(v) ((v) & 7)
414
#else
415
#define GET_LMASK64(v) (((v) & 7) ^ 7)
416
#endif
417

    
418
target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
419
{
420
    uint64_t tmp;
421

    
422
    tmp = do_lbu(arg2, mem_idx);
423
    arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
424

    
425
    if (GET_LMASK64(arg2) <= 6) {
426
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
427
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
428
    }
429

    
430
    if (GET_LMASK64(arg2) <= 5) {
431
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
432
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
433
    }
434

    
435
    if (GET_LMASK64(arg2) <= 4) {
436
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
437
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
438
    }
439

    
440
    if (GET_LMASK64(arg2) <= 3) {
441
        tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
442
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
443
    }
444

    
445
    if (GET_LMASK64(arg2) <= 2) {
446
        tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
447
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
448
    }
449

    
450
    if (GET_LMASK64(arg2) <= 1) {
451
        tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
452
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
453
    }
454

    
455
    if (GET_LMASK64(arg2) == 0) {
456
        tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
457
        arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
458
    }
459

    
460
    return arg1;
461
}
462

    
463
target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
464
{
465
    uint64_t tmp;
466

    
467
    tmp = do_lbu(arg2, mem_idx);
468
    arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
469

    
470
    if (GET_LMASK64(arg2) >= 1) {
471
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
472
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
473
    }
474

    
475
    if (GET_LMASK64(arg2) >= 2) {
476
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
477
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
478
    }
479

    
480
    if (GET_LMASK64(arg2) >= 3) {
481
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
482
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
483
    }
484

    
485
    if (GET_LMASK64(arg2) >= 4) {
486
        tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
487
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
488
    }
489

    
490
    if (GET_LMASK64(arg2) >= 5) {
491
        tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
492
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
493
    }
494

    
495
    if (GET_LMASK64(arg2) >= 6) {
496
        tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
497
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
498
    }
499

    
500
    if (GET_LMASK64(arg2) == 7) {
501
        tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
502
        arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
503
    }
504

    
505
    return arg1;
506
}
507

    
508
void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
509
{
510
    do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
511

    
512
    if (GET_LMASK64(arg2) <= 6)
513
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
514

    
515
    if (GET_LMASK64(arg2) <= 5)
516
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
517

    
518
    if (GET_LMASK64(arg2) <= 4)
519
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
520

    
521
    if (GET_LMASK64(arg2) <= 3)
522
        do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
523

    
524
    if (GET_LMASK64(arg2) <= 2)
525
        do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
526

    
527
    if (GET_LMASK64(arg2) <= 1)
528
        do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
529

    
530
    if (GET_LMASK64(arg2) <= 0)
531
        do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
532
}
533

    
534
void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
535
{
536
    do_sb(arg2, (uint8_t)arg1, mem_idx);
537

    
538
    if (GET_LMASK64(arg2) >= 1)
539
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
540

    
541
    if (GET_LMASK64(arg2) >= 2)
542
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
543

    
544
    if (GET_LMASK64(arg2) >= 3)
545
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
546

    
547
    if (GET_LMASK64(arg2) >= 4)
548
        do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
549

    
550
    if (GET_LMASK64(arg2) >= 5)
551
        do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
552

    
553
    if (GET_LMASK64(arg2) >= 6)
554
        do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
555

    
556
    if (GET_LMASK64(arg2) == 7)
557
        do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
558
}
559
#endif /* TARGET_MIPS64 */
560

    
561
static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
562

    
563
void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
564
{
565
    target_ulong base_reglist = reglist & 0xf;
566
    target_ulong do_r31 = reglist & 0x10;
567
#ifdef CONFIG_USER_ONLY
568
#undef ldfun
569
#define ldfun ldl_raw
570
#else
571
    uint32_t (*ldfun)(target_ulong);
572

    
573
    switch (mem_idx)
574
    {
575
    case 0: ldfun = ldl_kernel; break;
576
    case 1: ldfun = ldl_super; break;
577
    default:
578
    case 2: ldfun = ldl_user; break;
579
    }
580
#endif
581

    
582
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
583
        target_ulong i;
584

    
585
        for (i = 0; i < base_reglist; i++) {
586
            env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
587
            addr += 4;
588
        }
589
    }
590

    
591
    if (do_r31) {
592
        env->active_tc.gpr[31] = (target_long) ldfun(addr);
593
    }
594
}
595

    
596
void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
597
{
598
    target_ulong base_reglist = reglist & 0xf;
599
    target_ulong do_r31 = reglist & 0x10;
600
#ifdef CONFIG_USER_ONLY
601
#undef stfun
602
#define stfun stl_raw
603
#else
604
    void (*stfun)(target_ulong, uint32_t);
605

    
606
    switch (mem_idx)
607
    {
608
    case 0: stfun = stl_kernel; break;
609
    case 1: stfun = stl_super; break;
610
     default:
611
    case 2: stfun = stl_user; break;
612
    }
613
#endif
614

    
615
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
616
        target_ulong i;
617

    
618
        for (i = 0; i < base_reglist; i++) {
619
            stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
620
            addr += 4;
621
        }
622
    }
623

    
624
    if (do_r31) {
625
        stfun(addr, env->active_tc.gpr[31]);
626
    }
627
}
628

    
629
#if defined(TARGET_MIPS64)
630
void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
631
{
632
    target_ulong base_reglist = reglist & 0xf;
633
    target_ulong do_r31 = reglist & 0x10;
634
#ifdef CONFIG_USER_ONLY
635
#undef ldfun
636
#define ldfun ldq_raw
637
#else
638
    uint64_t (*ldfun)(target_ulong);
639

    
640
    switch (mem_idx)
641
    {
642
    case 0: ldfun = ldq_kernel; break;
643
    case 1: ldfun = ldq_super; break;
644
    default:
645
    case 2: ldfun = ldq_user; break;
646
    }
647
#endif
648

    
649
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
650
        target_ulong i;
651

    
652
        for (i = 0; i < base_reglist; i++) {
653
            env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
654
            addr += 8;
655
        }
656
    }
657

    
658
    if (do_r31) {
659
        env->active_tc.gpr[31] = ldfun(addr);
660
    }
661
}
662

    
663
void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
664
{
665
    target_ulong base_reglist = reglist & 0xf;
666
    target_ulong do_r31 = reglist & 0x10;
667
#ifdef CONFIG_USER_ONLY
668
#undef stfun
669
#define stfun stq_raw
670
#else
671
    void (*stfun)(target_ulong, uint64_t);
672

    
673
    switch (mem_idx)
674
    {
675
    case 0: stfun = stq_kernel; break;
676
    case 1: stfun = stq_super; break;
677
     default:
678
    case 2: stfun = stq_user; break;
679
    }
680
#endif
681

    
682
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
683
        target_ulong i;
684

    
685
        for (i = 0; i < base_reglist; i++) {
686
            stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
687
            addr += 8;
688
        }
689
    }
690

    
691
    if (do_r31) {
692
        stfun(addr, env->active_tc.gpr[31]);
693
    }
694
}
695
#endif
696

    
697
#ifndef CONFIG_USER_ONLY
698
/* CP0 helpers */
699
target_ulong helper_mfc0_mvpcontrol (void)
700
{
701
    return env->mvp->CP0_MVPControl;
702
}
703

    
704
target_ulong helper_mfc0_mvpconf0 (void)
705
{
706
    return env->mvp->CP0_MVPConf0;
707
}
708

    
709
target_ulong helper_mfc0_mvpconf1 (void)
710
{
711
    return env->mvp->CP0_MVPConf1;
712
}
713

    
714
target_ulong helper_mfc0_random (void)
715
{
716
    return (int32_t)cpu_mips_get_random(env);
717
}
718

    
719
target_ulong helper_mfc0_tcstatus (void)
720
{
721
    return env->active_tc.CP0_TCStatus;
722
}
723

    
724
target_ulong helper_mftc0_tcstatus(void)
725
{
726
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
727

    
728
    if (other_tc == env->current_tc)
729
        return env->active_tc.CP0_TCStatus;
730
    else
731
        return env->tcs[other_tc].CP0_TCStatus;
732
}
733

    
734
target_ulong helper_mfc0_tcbind (void)
735
{
736
    return env->active_tc.CP0_TCBind;
737
}
738

    
739
target_ulong helper_mftc0_tcbind(void)
740
{
741
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
742

    
743
    if (other_tc == env->current_tc)
744
        return env->active_tc.CP0_TCBind;
745
    else
746
        return env->tcs[other_tc].CP0_TCBind;
747
}
748

    
749
target_ulong helper_mfc0_tcrestart (void)
750
{
751
    return env->active_tc.PC;
752
}
753

    
754
target_ulong helper_mftc0_tcrestart(void)
755
{
756
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
757

    
758
    if (other_tc == env->current_tc)
759
        return env->active_tc.PC;
760
    else
761
        return env->tcs[other_tc].PC;
762
}
763

    
764
target_ulong helper_mfc0_tchalt (void)
765
{
766
    return env->active_tc.CP0_TCHalt;
767
}
768

    
769
target_ulong helper_mftc0_tchalt(void)
770
{
771
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
772

    
773
    if (other_tc == env->current_tc)
774
        return env->active_tc.CP0_TCHalt;
775
    else
776
        return env->tcs[other_tc].CP0_TCHalt;
777
}
778

    
779
target_ulong helper_mfc0_tccontext (void)
780
{
781
    return env->active_tc.CP0_TCContext;
782
}
783

    
784
target_ulong helper_mftc0_tccontext(void)
785
{
786
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
787

    
788
    if (other_tc == env->current_tc)
789
        return env->active_tc.CP0_TCContext;
790
    else
791
        return env->tcs[other_tc].CP0_TCContext;
792
}
793

    
794
target_ulong helper_mfc0_tcschedule (void)
795
{
796
    return env->active_tc.CP0_TCSchedule;
797
}
798

    
799
target_ulong helper_mftc0_tcschedule(void)
800
{
801
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
802

    
803
    if (other_tc == env->current_tc)
804
        return env->active_tc.CP0_TCSchedule;
805
    else
806
        return env->tcs[other_tc].CP0_TCSchedule;
807
}
808

    
809
target_ulong helper_mfc0_tcschefback (void)
810
{
811
    return env->active_tc.CP0_TCScheFBack;
812
}
813

    
814
target_ulong helper_mftc0_tcschefback(void)
815
{
816
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
817

    
818
    if (other_tc == env->current_tc)
819
        return env->active_tc.CP0_TCScheFBack;
820
    else
821
        return env->tcs[other_tc].CP0_TCScheFBack;
822
}
823

    
824
target_ulong helper_mfc0_count (void)
825
{
826
    return (int32_t)cpu_mips_get_count(env);
827
}
828

    
829
target_ulong helper_mftc0_entryhi(void)
830
{
831
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
832
    int32_t tcstatus;
833

    
834
    if (other_tc == env->current_tc)
835
        tcstatus = env->active_tc.CP0_TCStatus;
836
    else
837
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
838

    
839
    return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
840
}
841

    
842
target_ulong helper_mftc0_status(void)
843
{
844
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
845
    target_ulong t0;
846
    int32_t tcstatus;
847

    
848
    if (other_tc == env->current_tc)
849
        tcstatus = env->active_tc.CP0_TCStatus;
850
    else
851
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
852

    
853
    t0 = env->CP0_Status & ~0xf1000018;
854
    t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
855
    t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
856
    t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
857

    
858
    return t0;
859
}
860

    
861
target_ulong helper_mfc0_lladdr (void)
862
{
863
    return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
864
}
865

    
866
target_ulong helper_mfc0_watchlo (uint32_t sel)
867
{
868
    return (int32_t)env->CP0_WatchLo[sel];
869
}
870

    
871
target_ulong helper_mfc0_watchhi (uint32_t sel)
872
{
873
    return env->CP0_WatchHi[sel];
874
}
875

    
876
target_ulong helper_mfc0_debug (void)
877
{
878
    target_ulong t0 = env->CP0_Debug;
879
    if (env->hflags & MIPS_HFLAG_DM)
880
        t0 |= 1 << CP0DB_DM;
881

    
882
    return t0;
883
}
884

    
885
target_ulong helper_mftc0_debug(void)
886
{
887
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
888
    int32_t tcstatus;
889

    
890
    if (other_tc == env->current_tc)
891
        tcstatus = env->active_tc.CP0_Debug_tcstatus;
892
    else
893
        tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
894

    
895
    /* XXX: Might be wrong, check with EJTAG spec. */
896
    return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
897
            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
898
}
899

    
900
#if defined(TARGET_MIPS64)
901
target_ulong helper_dmfc0_tcrestart (void)
902
{
903
    return env->active_tc.PC;
904
}
905

    
906
target_ulong helper_dmfc0_tchalt (void)
907
{
908
    return env->active_tc.CP0_TCHalt;
909
}
910

    
911
target_ulong helper_dmfc0_tccontext (void)
912
{
913
    return env->active_tc.CP0_TCContext;
914
}
915

    
916
target_ulong helper_dmfc0_tcschedule (void)
917
{
918
    return env->active_tc.CP0_TCSchedule;
919
}
920

    
921
target_ulong helper_dmfc0_tcschefback (void)
922
{
923
    return env->active_tc.CP0_TCScheFBack;
924
}
925

    
926
target_ulong helper_dmfc0_lladdr (void)
927
{
928
    return env->lladdr >> env->CP0_LLAddr_shift;
929
}
930

    
931
target_ulong helper_dmfc0_watchlo (uint32_t sel)
932
{
933
    return env->CP0_WatchLo[sel];
934
}
935
#endif /* TARGET_MIPS64 */
936

    
937
void helper_mtc0_index (target_ulong arg1)
938
{
939
    int num = 1;
940
    unsigned int tmp = env->tlb->nb_tlb;
941

    
942
    do {
943
        tmp >>= 1;
944
        num <<= 1;
945
    } while (tmp);
946
    env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
947
}
948

    
949
void helper_mtc0_mvpcontrol (target_ulong arg1)
950
{
951
    uint32_t mask = 0;
952
    uint32_t newval;
953

    
954
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
955
        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
956
                (1 << CP0MVPCo_EVP);
957
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
958
        mask |= (1 << CP0MVPCo_STLB);
959
    newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
960

    
961
    // TODO: Enable/disable shared TLB, enable/disable VPEs.
962

    
963
    env->mvp->CP0_MVPControl = newval;
964
}
965

    
966
void helper_mtc0_vpecontrol (target_ulong arg1)
967
{
968
    uint32_t mask;
969
    uint32_t newval;
970

    
971
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
972
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
973
    newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
974

    
975
    /* Yield scheduler intercept not implemented. */
976
    /* Gating storage scheduler intercept not implemented. */
977

    
978
    // TODO: Enable/disable TCs.
979

    
980
    env->CP0_VPEControl = newval;
981
}
982

    
983
void helper_mtc0_vpeconf0 (target_ulong arg1)
984
{
985
    uint32_t mask = 0;
986
    uint32_t newval;
987

    
988
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
989
        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
990
            mask |= (0xff << CP0VPEC0_XTC);
991
        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
992
    }
993
    newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
994

    
995
    // TODO: TC exclusive handling due to ERL/EXL.
996

    
997
    env->CP0_VPEConf0 = newval;
998
}
999

    
1000
void helper_mtc0_vpeconf1 (target_ulong arg1)
1001
{
1002
    uint32_t mask = 0;
1003
    uint32_t newval;
1004

    
1005
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1006
        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1007
                (0xff << CP0VPEC1_NCP1);
1008
    newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1009

    
1010
    /* UDI not implemented. */
1011
    /* CP2 not implemented. */
1012

    
1013
    // TODO: Handle FPU (CP1) binding.
1014

    
1015
    env->CP0_VPEConf1 = newval;
1016
}
1017

    
1018
void helper_mtc0_yqmask (target_ulong arg1)
1019
{
1020
    /* Yield qualifier inputs not implemented. */
1021
    env->CP0_YQMask = 0x00000000;
1022
}
1023

    
1024
void helper_mtc0_vpeopt (target_ulong arg1)
1025
{
1026
    env->CP0_VPEOpt = arg1 & 0x0000ffff;
1027
}
1028

    
1029
void helper_mtc0_entrylo0 (target_ulong arg1)
1030
{
1031
    /* Large physaddr (PABITS) not implemented */
1032
    /* 1k pages not implemented */
1033
    env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1034
}
1035

    
1036
void helper_mtc0_tcstatus (target_ulong arg1)
1037
{
1038
    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1039
    uint32_t newval;
1040

    
1041
    newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1042

    
1043
    // TODO: Sync with CP0_Status.
1044

    
1045
    env->active_tc.CP0_TCStatus = newval;
1046
}
1047

    
1048
void helper_mttc0_tcstatus (target_ulong arg1)
1049
{
1050
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1051

    
1052
    // TODO: Sync with CP0_Status.
1053

    
1054
    if (other_tc == env->current_tc)
1055
        env->active_tc.CP0_TCStatus = arg1;
1056
    else
1057
        env->tcs[other_tc].CP0_TCStatus = arg1;
1058
}
1059

    
1060
void helper_mtc0_tcbind (target_ulong arg1)
1061
{
1062
    uint32_t mask = (1 << CP0TCBd_TBE);
1063
    uint32_t newval;
1064

    
1065
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1066
        mask |= (1 << CP0TCBd_CurVPE);
1067
    newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1068
    env->active_tc.CP0_TCBind = newval;
1069
}
1070

    
1071
void helper_mttc0_tcbind (target_ulong arg1)
1072
{
1073
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1074
    uint32_t mask = (1 << CP0TCBd_TBE);
1075
    uint32_t newval;
1076

    
1077
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1078
        mask |= (1 << CP0TCBd_CurVPE);
1079
    if (other_tc == env->current_tc) {
1080
        newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1081
        env->active_tc.CP0_TCBind = newval;
1082
    } else {
1083
        newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1084
        env->tcs[other_tc].CP0_TCBind = newval;
1085
    }
1086
}
1087

    
1088
void helper_mtc0_tcrestart (target_ulong arg1)
1089
{
1090
    env->active_tc.PC = arg1;
1091
    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1092
    env->lladdr = 0ULL;
1093
    /* MIPS16 not implemented. */
1094
}
1095

    
1096
void helper_mttc0_tcrestart (target_ulong arg1)
1097
{
1098
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1099

    
1100
    if (other_tc == env->current_tc) {
1101
        env->active_tc.PC = arg1;
1102
        env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1103
        env->lladdr = 0ULL;
1104
        /* MIPS16 not implemented. */
1105
    } else {
1106
        env->tcs[other_tc].PC = arg1;
1107
        env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1108
        env->lladdr = 0ULL;
1109
        /* MIPS16 not implemented. */
1110
    }
1111
}
1112

    
1113
void helper_mtc0_tchalt (target_ulong arg1)
1114
{
1115
    env->active_tc.CP0_TCHalt = arg1 & 0x1;
1116

    
1117
    // TODO: Halt TC / Restart (if allocated+active) TC.
1118
}
1119

    
1120
void helper_mttc0_tchalt (target_ulong arg1)
1121
{
1122
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1123

    
1124
    // TODO: Halt TC / Restart (if allocated+active) TC.
1125

    
1126
    if (other_tc == env->current_tc)
1127
        env->active_tc.CP0_TCHalt = arg1;
1128
    else
1129
        env->tcs[other_tc].CP0_TCHalt = arg1;
1130
}
1131

    
1132
void helper_mtc0_tccontext (target_ulong arg1)
1133
{
1134
    env->active_tc.CP0_TCContext = arg1;
1135
}
1136

    
1137
void helper_mttc0_tccontext (target_ulong arg1)
1138
{
1139
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1140

    
1141
    if (other_tc == env->current_tc)
1142
        env->active_tc.CP0_TCContext = arg1;
1143
    else
1144
        env->tcs[other_tc].CP0_TCContext = arg1;
1145
}
1146

    
1147
void helper_mtc0_tcschedule (target_ulong arg1)
1148
{
1149
    env->active_tc.CP0_TCSchedule = arg1;
1150
}
1151

    
1152
void helper_mttc0_tcschedule (target_ulong arg1)
1153
{
1154
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1155

    
1156
    if (other_tc == env->current_tc)
1157
        env->active_tc.CP0_TCSchedule = arg1;
1158
    else
1159
        env->tcs[other_tc].CP0_TCSchedule = arg1;
1160
}
1161

    
1162
void helper_mtc0_tcschefback (target_ulong arg1)
1163
{
1164
    env->active_tc.CP0_TCScheFBack = arg1;
1165
}
1166

    
1167
void helper_mttc0_tcschefback (target_ulong arg1)
1168
{
1169
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1170

    
1171
    if (other_tc == env->current_tc)
1172
        env->active_tc.CP0_TCScheFBack = arg1;
1173
    else
1174
        env->tcs[other_tc].CP0_TCScheFBack = arg1;
1175
}
1176

    
1177
void helper_mtc0_entrylo1 (target_ulong arg1)
1178
{
1179
    /* Large physaddr (PABITS) not implemented */
1180
    /* 1k pages not implemented */
1181
    env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1182
}
1183

    
1184
void helper_mtc0_context (target_ulong arg1)
1185
{
1186
    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1187
}
1188

    
1189
void helper_mtc0_pagemask (target_ulong arg1)
1190
{
1191
    /* 1k pages not implemented */
1192
    env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1193
}
1194

    
1195
void helper_mtc0_pagegrain (target_ulong arg1)
1196
{
1197
    /* SmartMIPS not implemented */
1198
    /* Large physaddr (PABITS) not implemented */
1199
    /* 1k pages not implemented */
1200
    env->CP0_PageGrain = 0;
1201
}
1202

    
1203
void helper_mtc0_wired (target_ulong arg1)
1204
{
1205
    env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1206
}
1207

    
1208
void helper_mtc0_srsconf0 (target_ulong arg1)
1209
{
1210
    env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1211
}
1212

    
1213
void helper_mtc0_srsconf1 (target_ulong arg1)
1214
{
1215
    env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1216
}
1217

    
1218
void helper_mtc0_srsconf2 (target_ulong arg1)
1219
{
1220
    env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1221
}
1222

    
1223
void helper_mtc0_srsconf3 (target_ulong arg1)
1224
{
1225
    env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1226
}
1227

    
1228
void helper_mtc0_srsconf4 (target_ulong arg1)
1229
{
1230
    env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1231
}
1232

    
1233
void helper_mtc0_hwrena (target_ulong arg1)
1234
{
1235
    env->CP0_HWREna = arg1 & 0x0000000F;
1236
}
1237

    
1238
void helper_mtc0_count (target_ulong arg1)
1239
{
1240
    cpu_mips_store_count(env, arg1);
1241
}
1242

    
1243
void helper_mtc0_entryhi (target_ulong arg1)
1244
{
1245
    target_ulong old, val;
1246

    
1247
    /* 1k pages not implemented */
1248
    val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1249
#if defined(TARGET_MIPS64)
1250
    val &= env->SEGMask;
1251
#endif
1252
    old = env->CP0_EntryHi;
1253
    env->CP0_EntryHi = val;
1254
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1255
        uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
1256
        env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
1257
    }
1258
    /* If the ASID changes, flush qemu's TLB.  */
1259
    if ((old & 0xFF) != (val & 0xFF))
1260
        cpu_mips_tlb_flush(env, 1);
1261
}
1262

    
1263
void helper_mttc0_entryhi(target_ulong arg1)
1264
{
1265
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1266
    int32_t tcstatus;
1267

    
1268
    env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (arg1 & ~0xff);
1269
    if (other_tc == env->current_tc) {
1270
        tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1271
        env->active_tc.CP0_TCStatus = tcstatus;
1272
    } else {
1273
        tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1274
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1275
    }
1276
}
1277

    
1278
void helper_mtc0_compare (target_ulong arg1)
1279
{
1280
    cpu_mips_store_compare(env, arg1);
1281
}
1282

    
1283
void helper_mtc0_status (target_ulong arg1)
1284
{
1285
    uint32_t val, old;
1286
    uint32_t mask = env->CP0_Status_rw_bitmask;
1287

    
1288
    val = arg1 & mask;
1289
    old = env->CP0_Status;
1290
    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1291
    compute_hflags(env);
1292
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1293
        qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1294
                old, old & env->CP0_Cause & CP0Ca_IP_mask,
1295
                val, val & env->CP0_Cause & CP0Ca_IP_mask,
1296
                env->CP0_Cause);
1297
        switch (env->hflags & MIPS_HFLAG_KSU) {
1298
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1299
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1300
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1301
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1302
        }
1303
    }
1304
}
1305

    
1306
void helper_mttc0_status(target_ulong arg1)
1307
{
1308
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1309
    int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
1310

    
1311
    env->CP0_Status = arg1 & ~0xf1000018;
1312
    tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (arg1 & (0xf << CP0St_CU0));
1313
    tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((arg1 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
1314
    tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((arg1 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
1315
    if (other_tc == env->current_tc)
1316
        env->active_tc.CP0_TCStatus = tcstatus;
1317
    else
1318
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1319
}
1320

    
1321
void helper_mtc0_intctl (target_ulong arg1)
1322
{
1323
    /* vectored interrupts not implemented, no performance counters. */
1324
    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (arg1 & 0x000002e0);
1325
}
1326

    
1327
void helper_mtc0_srsctl (target_ulong arg1)
1328
{
1329
    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1330
    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1331
}
1332

    
1333
void helper_mtc0_cause (target_ulong arg1)
1334
{
1335
    uint32_t mask = 0x00C00300;
1336
    uint32_t old = env->CP0_Cause;
1337
    int i;
1338

    
1339
    if (env->insn_flags & ISA_MIPS32R2)
1340
        mask |= 1 << CP0Ca_DC;
1341

    
1342
    env->CP0_Cause = (env->CP0_Cause & ~mask) | (arg1 & mask);
1343

    
1344
    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1345
        if (env->CP0_Cause & (1 << CP0Ca_DC))
1346
            cpu_mips_stop_count(env);
1347
        else
1348
            cpu_mips_start_count(env);
1349
    }
1350

    
1351
    /* Set/reset software interrupts */
1352
    for (i = 0 ; i < 2 ; i++) {
1353
        if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1354
            cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i)));
1355
        }
1356
    }
1357
}
1358

    
1359
void helper_mtc0_ebase (target_ulong arg1)
1360
{
1361
    /* vectored interrupts not implemented */
1362
    env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1363
}
1364

    
1365
void helper_mtc0_config0 (target_ulong arg1)
1366
{
1367
    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1368
}
1369

    
1370
void helper_mtc0_config2 (target_ulong arg1)
1371
{
1372
    /* tertiary/secondary caches not implemented */
1373
    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1374
}
1375

    
1376
void helper_mtc0_lladdr (target_ulong arg1)
1377
{
1378
    target_long mask = env->CP0_LLAddr_rw_bitmask;
1379
    arg1 = arg1 << env->CP0_LLAddr_shift;
1380
    env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1381
}
1382

    
1383
void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1384
{
1385
    /* Watch exceptions for instructions, data loads, data stores
1386
       not implemented. */
1387
    env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1388
}
1389

    
1390
void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1391
{
1392
    env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1393
    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1394
}
1395

    
1396
void helper_mtc0_xcontext (target_ulong arg1)
1397
{
1398
    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1399
    env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1400
}
1401

    
1402
void helper_mtc0_framemask (target_ulong arg1)
1403
{
1404
    env->CP0_Framemask = arg1; /* XXX */
1405
}
1406

    
1407
void helper_mtc0_debug (target_ulong arg1)
1408
{
1409
    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1410
    if (arg1 & (1 << CP0DB_DM))
1411
        env->hflags |= MIPS_HFLAG_DM;
1412
    else
1413
        env->hflags &= ~MIPS_HFLAG_DM;
1414
}
1415

    
1416
void helper_mttc0_debug(target_ulong arg1)
1417
{
1418
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1419
    uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1420

    
1421
    /* XXX: Might be wrong, check with EJTAG spec. */
1422
    if (other_tc == env->current_tc)
1423
        env->active_tc.CP0_Debug_tcstatus = val;
1424
    else
1425
        env->tcs[other_tc].CP0_Debug_tcstatus = val;
1426
    env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1427
                     (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1428
}
1429

    
1430
void helper_mtc0_performance0 (target_ulong arg1)
1431
{
1432
    env->CP0_Performance0 = arg1 & 0x000007ff;
1433
}
1434

    
1435
void helper_mtc0_taglo (target_ulong arg1)
1436
{
1437
    env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1438
}
1439

    
1440
void helper_mtc0_datalo (target_ulong arg1)
1441
{
1442
    env->CP0_DataLo = arg1; /* XXX */
1443
}
1444

    
1445
void helper_mtc0_taghi (target_ulong arg1)
1446
{
1447
    env->CP0_TagHi = arg1; /* XXX */
1448
}
1449

    
1450
void helper_mtc0_datahi (target_ulong arg1)
1451
{
1452
    env->CP0_DataHi = arg1; /* XXX */
1453
}
1454

    
1455
/* MIPS MT functions */
1456
target_ulong helper_mftgpr(uint32_t sel)
1457
{
1458
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1459

    
1460
    if (other_tc == env->current_tc)
1461
        return env->active_tc.gpr[sel];
1462
    else
1463
        return env->tcs[other_tc].gpr[sel];
1464
}
1465

    
1466
target_ulong helper_mftlo(uint32_t sel)
1467
{
1468
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1469

    
1470
    if (other_tc == env->current_tc)
1471
        return env->active_tc.LO[sel];
1472
    else
1473
        return env->tcs[other_tc].LO[sel];
1474
}
1475

    
1476
target_ulong helper_mfthi(uint32_t sel)
1477
{
1478
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1479

    
1480
    if (other_tc == env->current_tc)
1481
        return env->active_tc.HI[sel];
1482
    else
1483
        return env->tcs[other_tc].HI[sel];
1484
}
1485

    
1486
target_ulong helper_mftacx(uint32_t sel)
1487
{
1488
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1489

    
1490
    if (other_tc == env->current_tc)
1491
        return env->active_tc.ACX[sel];
1492
    else
1493
        return env->tcs[other_tc].ACX[sel];
1494
}
1495

    
1496
target_ulong helper_mftdsp(void)
1497
{
1498
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1499

    
1500
    if (other_tc == env->current_tc)
1501
        return env->active_tc.DSPControl;
1502
    else
1503
        return env->tcs[other_tc].DSPControl;
1504
}
1505

    
1506
void helper_mttgpr(target_ulong arg1, uint32_t sel)
1507
{
1508
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1509

    
1510
    if (other_tc == env->current_tc)
1511
        env->active_tc.gpr[sel] = arg1;
1512
    else
1513
        env->tcs[other_tc].gpr[sel] = arg1;
1514
}
1515

    
1516
void helper_mttlo(target_ulong arg1, uint32_t sel)
1517
{
1518
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1519

    
1520
    if (other_tc == env->current_tc)
1521
        env->active_tc.LO[sel] = arg1;
1522
    else
1523
        env->tcs[other_tc].LO[sel] = arg1;
1524
}
1525

    
1526
void helper_mtthi(target_ulong arg1, uint32_t sel)
1527
{
1528
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1529

    
1530
    if (other_tc == env->current_tc)
1531
        env->active_tc.HI[sel] = arg1;
1532
    else
1533
        env->tcs[other_tc].HI[sel] = arg1;
1534
}
1535

    
1536
void helper_mttacx(target_ulong arg1, uint32_t sel)
1537
{
1538
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1539

    
1540
    if (other_tc == env->current_tc)
1541
        env->active_tc.ACX[sel] = arg1;
1542
    else
1543
        env->tcs[other_tc].ACX[sel] = arg1;
1544
}
1545

    
1546
void helper_mttdsp(target_ulong arg1)
1547
{
1548
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1549

    
1550
    if (other_tc == env->current_tc)
1551
        env->active_tc.DSPControl = arg1;
1552
    else
1553
        env->tcs[other_tc].DSPControl = arg1;
1554
}
1555

    
1556
/* MIPS MT functions */
1557
target_ulong helper_dmt(target_ulong arg1)
1558
{
1559
    // TODO
1560
    arg1 = 0;
1561
    // rt = arg1
1562

    
1563
    return arg1;
1564
}
1565

    
1566
target_ulong helper_emt(target_ulong arg1)
1567
{
1568
    // TODO
1569
    arg1 = 0;
1570
    // rt = arg1
1571

    
1572
    return arg1;
1573
}
1574

    
1575
target_ulong helper_dvpe(target_ulong arg1)
1576
{
1577
    // TODO
1578
    arg1 = 0;
1579
    // rt = arg1
1580

    
1581
    return arg1;
1582
}
1583

    
1584
target_ulong helper_evpe(target_ulong arg1)
1585
{
1586
    // TODO
1587
    arg1 = 0;
1588
    // rt = arg1
1589

    
1590
    return arg1;
1591
}
1592
#endif /* !CONFIG_USER_ONLY */
1593

    
1594
void helper_fork(target_ulong arg1, target_ulong arg2)
1595
{
1596
    // arg1 = rt, arg2 = rs
1597
    arg1 = 0;
1598
    // TODO: store to TC register
1599
}
1600

    
1601
target_ulong helper_yield(target_ulong arg)
1602
{
1603
    target_long arg1 = arg;
1604

    
1605
    if (arg1 < 0) {
1606
        /* No scheduling policy implemented. */
1607
        if (arg1 != -2) {
1608
            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1609
                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1610
                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1611
                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1612
                helper_raise_exception(EXCP_THREAD);
1613
            }
1614
        }
1615
    } else if (arg1 == 0) {
1616
        if (0 /* TODO: TC underflow */) {
1617
            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1618
            helper_raise_exception(EXCP_THREAD);
1619
        } else {
1620
            // TODO: Deallocate TC
1621
        }
1622
    } else if (arg1 > 0) {
1623
        /* Yield qualifier inputs not implemented. */
1624
        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1625
        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1626
        helper_raise_exception(EXCP_THREAD);
1627
    }
1628
    return env->CP0_YQMask;
1629
}
1630

    
1631
#ifndef CONFIG_USER_ONLY
1632
/* TLB management */
1633
static void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1634
{
1635
    /* Flush qemu's TLB and discard all shadowed entries.  */
1636
    tlb_flush (env, flush_global);
1637
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
1638
}
1639

    
1640
static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1641
{
1642
    /* Discard entries from env->tlb[first] onwards.  */
1643
    while (env->tlb->tlb_in_use > first) {
1644
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1645
    }
1646
}
1647

    
1648
static void r4k_fill_tlb (int idx)
1649
{
1650
    r4k_tlb_t *tlb;
1651

    
1652
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1653
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1654
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1655
#if defined(TARGET_MIPS64)
1656
    tlb->VPN &= env->SEGMask;
1657
#endif
1658
    tlb->ASID = env->CP0_EntryHi & 0xFF;
1659
    tlb->PageMask = env->CP0_PageMask;
1660
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1661
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1662
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1663
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1664
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1665
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1666
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1667
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1668
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1669
}
1670

    
1671
void r4k_helper_tlbwi (void)
1672
{
1673
    int idx;
1674

    
1675
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1676

    
1677
    /* Discard cached TLB entries.  We could avoid doing this if the
1678
       tlbwi is just upgrading access permissions on the current entry;
1679
       that might be a further win.  */
1680
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1681

    
1682
    r4k_invalidate_tlb(env, idx, 0);
1683
    r4k_fill_tlb(idx);
1684
}
1685

    
1686
void r4k_helper_tlbwr (void)
1687
{
1688
    int r = cpu_mips_get_random(env);
1689

    
1690
    r4k_invalidate_tlb(env, r, 1);
1691
    r4k_fill_tlb(r);
1692
}
1693

    
1694
void r4k_helper_tlbp (void)
1695
{
1696
    r4k_tlb_t *tlb;
1697
    target_ulong mask;
1698
    target_ulong tag;
1699
    target_ulong VPN;
1700
    uint8_t ASID;
1701
    int i;
1702

    
1703
    ASID = env->CP0_EntryHi & 0xFF;
1704
    for (i = 0; i < env->tlb->nb_tlb; i++) {
1705
        tlb = &env->tlb->mmu.r4k.tlb[i];
1706
        /* 1k pages are not supported. */
1707
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1708
        tag = env->CP0_EntryHi & ~mask;
1709
        VPN = tlb->VPN & ~mask;
1710
        /* Check ASID, virtual page number & size */
1711
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1712
            /* TLB match */
1713
            env->CP0_Index = i;
1714
            break;
1715
        }
1716
    }
1717
    if (i == env->tlb->nb_tlb) {
1718
        /* No match.  Discard any shadow entries, if any of them match.  */
1719
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1720
            tlb = &env->tlb->mmu.r4k.tlb[i];
1721
            /* 1k pages are not supported. */
1722
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1723
            tag = env->CP0_EntryHi & ~mask;
1724
            VPN = tlb->VPN & ~mask;
1725
            /* Check ASID, virtual page number & size */
1726
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1727
                r4k_mips_tlb_flush_extra (env, i);
1728
                break;
1729
            }
1730
        }
1731

    
1732
        env->CP0_Index |= 0x80000000;
1733
    }
1734
}
1735

    
1736
void r4k_helper_tlbr (void)
1737
{
1738
    r4k_tlb_t *tlb;
1739
    uint8_t ASID;
1740
    int idx;
1741

    
1742
    ASID = env->CP0_EntryHi & 0xFF;
1743
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1744
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1745

    
1746
    /* If this will change the current ASID, flush qemu's TLB.  */
1747
    if (ASID != tlb->ASID)
1748
        cpu_mips_tlb_flush (env, 1);
1749

    
1750
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1751

    
1752
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1753
    env->CP0_PageMask = tlb->PageMask;
1754
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1755
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1756
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1757
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1758
}
1759

    
1760
void helper_tlbwi(void)
1761
{
1762
    env->tlb->helper_tlbwi();
1763
}
1764

    
1765
void helper_tlbwr(void)
1766
{
1767
    env->tlb->helper_tlbwr();
1768
}
1769

    
1770
void helper_tlbp(void)
1771
{
1772
    env->tlb->helper_tlbp();
1773
}
1774

    
1775
void helper_tlbr(void)
1776
{
1777
    env->tlb->helper_tlbr();
1778
}
1779

    
1780
/* Specials */
1781
target_ulong helper_di (void)
1782
{
1783
    target_ulong t0 = env->CP0_Status;
1784

    
1785
    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1786
    return t0;
1787
}
1788

    
1789
target_ulong helper_ei (void)
1790
{
1791
    target_ulong t0 = env->CP0_Status;
1792

    
1793
    env->CP0_Status = t0 | (1 << CP0St_IE);
1794
    return t0;
1795
}
1796

    
1797
static void debug_pre_eret (void)
1798
{
1799
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1800
        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1801
                env->active_tc.PC, env->CP0_EPC);
1802
        if (env->CP0_Status & (1 << CP0St_ERL))
1803
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1804
        if (env->hflags & MIPS_HFLAG_DM)
1805
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1806
        qemu_log("\n");
1807
    }
1808
}
1809

    
1810
static void debug_post_eret (void)
1811
{
1812
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1813
        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1814
                env->active_tc.PC, env->CP0_EPC);
1815
        if (env->CP0_Status & (1 << CP0St_ERL))
1816
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1817
        if (env->hflags & MIPS_HFLAG_DM)
1818
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1819
        switch (env->hflags & MIPS_HFLAG_KSU) {
1820
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1821
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1822
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1823
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1824
        }
1825
    }
1826
}
1827

    
1828
static void set_pc (target_ulong error_pc)
1829
{
1830
    env->active_tc.PC = error_pc & ~(target_ulong)1;
1831
    if (error_pc & 1) {
1832
        env->hflags |= MIPS_HFLAG_M16;
1833
    } else {
1834
        env->hflags &= ~(MIPS_HFLAG_M16);
1835
    }
1836
}
1837

    
1838
void helper_eret (void)
1839
{
1840
    debug_pre_eret();
1841
    if (env->CP0_Status & (1 << CP0St_ERL)) {
1842
        set_pc(env->CP0_ErrorEPC);
1843
        env->CP0_Status &= ~(1 << CP0St_ERL);
1844
    } else {
1845
        set_pc(env->CP0_EPC);
1846
        env->CP0_Status &= ~(1 << CP0St_EXL);
1847
    }
1848
    compute_hflags(env);
1849
    debug_post_eret();
1850
    env->lladdr = 1;
1851
}
1852

    
1853
void helper_deret (void)
1854
{
1855
    debug_pre_eret();
1856
    set_pc(env->CP0_DEPC);
1857

    
1858
    env->hflags &= MIPS_HFLAG_DM;
1859
    compute_hflags(env);
1860
    debug_post_eret();
1861
    env->lladdr = 1;
1862
}
1863
#endif /* !CONFIG_USER_ONLY */
1864

    
1865
target_ulong helper_rdhwr_cpunum(void)
1866
{
1867
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1868
        (env->CP0_HWREna & (1 << 0)))
1869
        return env->CP0_EBase & 0x3ff;
1870
    else
1871
        helper_raise_exception(EXCP_RI);
1872

    
1873
    return 0;
1874
}
1875

    
1876
target_ulong helper_rdhwr_synci_step(void)
1877
{
1878
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1879
        (env->CP0_HWREna & (1 << 1)))
1880
        return env->SYNCI_Step;
1881
    else
1882
        helper_raise_exception(EXCP_RI);
1883

    
1884
    return 0;
1885
}
1886

    
1887
target_ulong helper_rdhwr_cc(void)
1888
{
1889
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1890
        (env->CP0_HWREna & (1 << 2)))
1891
        return env->CP0_Count;
1892
    else
1893
        helper_raise_exception(EXCP_RI);
1894

    
1895
    return 0;
1896
}
1897

    
1898
target_ulong helper_rdhwr_ccres(void)
1899
{
1900
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1901
        (env->CP0_HWREna & (1 << 3)))
1902
        return env->CCRes;
1903
    else
1904
        helper_raise_exception(EXCP_RI);
1905

    
1906
    return 0;
1907
}
1908

    
1909
void helper_pmon (int function)
1910
{
1911
    function /= 2;
1912
    switch (function) {
1913
    case 2: /* TODO: char inbyte(int waitflag); */
1914
        if (env->active_tc.gpr[4] == 0)
1915
            env->active_tc.gpr[2] = -1;
1916
        /* Fall through */
1917
    case 11: /* TODO: char inbyte (void); */
1918
        env->active_tc.gpr[2] = -1;
1919
        break;
1920
    case 3:
1921
    case 12:
1922
        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1923
        break;
1924
    case 17:
1925
        break;
1926
    case 158:
1927
        {
1928
            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
1929
            printf("%s", fmt);
1930
        }
1931
        break;
1932
    }
1933
}
1934

    
1935
void helper_wait (void)
1936
{
1937
    env->halted = 1;
1938
    helper_raise_exception(EXCP_HLT);
1939
}
1940

    
1941
#if !defined(CONFIG_USER_ONLY)
1942

    
1943
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1944

    
1945
#define MMUSUFFIX _mmu
1946
#define ALIGNED_ONLY
1947

    
1948
#define SHIFT 0
1949
#include "softmmu_template.h"
1950

    
1951
#define SHIFT 1
1952
#include "softmmu_template.h"
1953

    
1954
#define SHIFT 2
1955
#include "softmmu_template.h"
1956

    
1957
#define SHIFT 3
1958
#include "softmmu_template.h"
1959

    
1960
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
1961
{
1962
    env->CP0_BadVAddr = addr;
1963
    do_restore_state (retaddr);
1964
    helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
1965
}
1966

    
1967
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1968
{
1969
    TranslationBlock *tb;
1970
    CPUState *saved_env;
1971
    unsigned long pc;
1972
    int ret;
1973

    
1974
    /* XXX: hack to restore env in all cases, even if not called from
1975
       generated code */
1976
    saved_env = env;
1977
    env = cpu_single_env;
1978
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1979
    if (ret) {
1980
        if (retaddr) {
1981
            /* now we have a real cpu fault */
1982
            pc = (unsigned long)retaddr;
1983
            tb = tb_find_pc(pc);
1984
            if (tb) {
1985
                /* the PC is inside the translated code. It means that we have
1986
                   a virtual CPU fault */
1987
                cpu_restore_state(tb, env, pc, NULL);
1988
            }
1989
        }
1990
        helper_raise_exception_err(env->exception_index, env->error_code);
1991
    }
1992
    env = saved_env;
1993
}
1994

    
1995
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
1996
                          int unused, int size)
1997
{
1998
    if (is_exec)
1999
        helper_raise_exception(EXCP_IBE);
2000
    else
2001
        helper_raise_exception(EXCP_DBE);
2002
}
2003
#endif /* !CONFIG_USER_ONLY */
2004

    
2005
/* Complex FPU operations which may need stack space. */
2006

    
2007
#define FLOAT_ONE32 make_float32(0x3f8 << 20)
2008
#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2009
#define FLOAT_TWO32 make_float32(1 << 30)
2010
#define FLOAT_TWO64 make_float64(1ULL << 62)
2011
#define FLOAT_QNAN32 0x7fbfffff
2012
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2013
#define FLOAT_SNAN32 0x7fffffff
2014
#define FLOAT_SNAN64 0x7fffffffffffffffULL
2015

    
2016
/* convert MIPS rounding mode in FCR31 to IEEE library */
2017
static unsigned int ieee_rm[] = {
2018
    float_round_nearest_even,
2019
    float_round_to_zero,
2020
    float_round_up,
2021
    float_round_down
2022
};
2023

    
2024
#define RESTORE_ROUNDING_MODE \
2025
    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2026

    
2027
#define RESTORE_FLUSH_MODE \
2028
    set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2029

    
2030
target_ulong helper_cfc1 (uint32_t reg)
2031
{
2032
    target_ulong arg1;
2033

    
2034
    switch (reg) {
2035
    case 0:
2036
        arg1 = (int32_t)env->active_fpu.fcr0;
2037
        break;
2038
    case 25:
2039
        arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2040
        break;
2041
    case 26:
2042
        arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2043
        break;
2044
    case 28:
2045
        arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2046
        break;
2047
    default:
2048
        arg1 = (int32_t)env->active_fpu.fcr31;
2049
        break;
2050
    }
2051

    
2052
    return arg1;
2053
}
2054

    
2055
void helper_ctc1 (target_ulong arg1, uint32_t reg)
2056
{
2057
    switch(reg) {
2058
    case 25:
2059
        if (arg1 & 0xffffff00)
2060
            return;
2061
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2062
                     ((arg1 & 0x1) << 23);
2063
        break;
2064
    case 26:
2065
        if (arg1 & 0x007c0000)
2066
            return;
2067
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2068
        break;
2069
    case 28:
2070
        if (arg1 & 0x007c0000)
2071
            return;
2072
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2073
                     ((arg1 & 0x4) << 22);
2074
        break;
2075
    case 31:
2076
        if (arg1 & 0x007c0000)
2077
            return;
2078
        env->active_fpu.fcr31 = arg1;
2079
        break;
2080
    default:
2081
        return;
2082
    }
2083
    /* set rounding mode */
2084
    RESTORE_ROUNDING_MODE;
2085
    /* set flush-to-zero mode */
2086
    RESTORE_FLUSH_MODE;
2087
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2088
    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2089
        helper_raise_exception(EXCP_FPE);
2090
}
2091

    
2092
static inline char ieee_ex_to_mips(char xcpt)
2093
{
2094
    return (xcpt & float_flag_inexact) >> 5 |
2095
           (xcpt & float_flag_underflow) >> 3 |
2096
           (xcpt & float_flag_overflow) >> 1 |
2097
           (xcpt & float_flag_divbyzero) << 1 |
2098
           (xcpt & float_flag_invalid) << 4;
2099
}
2100

    
2101
static inline char mips_ex_to_ieee(char xcpt)
2102
{
2103
    return (xcpt & FP_INEXACT) << 5 |
2104
           (xcpt & FP_UNDERFLOW) << 3 |
2105
           (xcpt & FP_OVERFLOW) << 1 |
2106
           (xcpt & FP_DIV0) >> 1 |
2107
           (xcpt & FP_INVALID) >> 4;
2108
}
2109

    
2110
static inline void update_fcr31(void)
2111
{
2112
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2113

    
2114
    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2115
    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2116
        helper_raise_exception(EXCP_FPE);
2117
    else
2118
        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2119
}
2120

    
2121
/* Float support.
2122
   Single precition routines have a "s" suffix, double precision a
2123
   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2124
   paired single lower "pl", paired single upper "pu".  */
2125

    
2126
/* unary operations, modifying fp status  */
2127
uint64_t helper_float_sqrt_d(uint64_t fdt0)
2128
{
2129
    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2130
}
2131

    
2132
uint32_t helper_float_sqrt_s(uint32_t fst0)
2133
{
2134
    return float32_sqrt(fst0, &env->active_fpu.fp_status);
2135
}
2136

    
2137
uint64_t helper_float_cvtd_s(uint32_t fst0)
2138
{
2139
    uint64_t fdt2;
2140

    
2141
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2142
    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2143
    update_fcr31();
2144
    return fdt2;
2145
}
2146

    
2147
uint64_t helper_float_cvtd_w(uint32_t wt0)
2148
{
2149
    uint64_t fdt2;
2150

    
2151
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2152
    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2153
    update_fcr31();
2154
    return fdt2;
2155
}
2156

    
2157
uint64_t helper_float_cvtd_l(uint64_t dt0)
2158
{
2159
    uint64_t fdt2;
2160

    
2161
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2162
    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2163
    update_fcr31();
2164
    return fdt2;
2165
}
2166

    
2167
uint64_t helper_float_cvtl_d(uint64_t fdt0)
2168
{
2169
    uint64_t dt2;
2170

    
2171
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2172
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2173
    update_fcr31();
2174
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2175
        dt2 = FLOAT_SNAN64;
2176
    return dt2;
2177
}
2178

    
2179
uint64_t helper_float_cvtl_s(uint32_t fst0)
2180
{
2181
    uint64_t dt2;
2182

    
2183
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2184
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2185
    update_fcr31();
2186
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2187
        dt2 = FLOAT_SNAN64;
2188
    return dt2;
2189
}
2190

    
2191
uint64_t helper_float_cvtps_pw(uint64_t dt0)
2192
{
2193
    uint32_t fst2;
2194
    uint32_t fsth2;
2195

    
2196
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2197
    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2198
    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2199
    update_fcr31();
2200
    return ((uint64_t)fsth2 << 32) | fst2;
2201
}
2202

    
2203
uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2204
{
2205
    uint32_t wt2;
2206
    uint32_t wth2;
2207

    
2208
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2209
    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2210
    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2211
    update_fcr31();
2212
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2213
        wt2 = FLOAT_SNAN32;
2214
        wth2 = FLOAT_SNAN32;
2215
    }
2216
    return ((uint64_t)wth2 << 32) | wt2;
2217
}
2218

    
2219
uint32_t helper_float_cvts_d(uint64_t fdt0)
2220
{
2221
    uint32_t fst2;
2222

    
2223
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2224
    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2225
    update_fcr31();
2226
    return fst2;
2227
}
2228

    
2229
uint32_t helper_float_cvts_w(uint32_t wt0)
2230
{
2231
    uint32_t fst2;
2232

    
2233
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2234
    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2235
    update_fcr31();
2236
    return fst2;
2237
}
2238

    
2239
uint32_t helper_float_cvts_l(uint64_t dt0)
2240
{
2241
    uint32_t fst2;
2242

    
2243
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2244
    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2245
    update_fcr31();
2246
    return fst2;
2247
}
2248

    
2249
uint32_t helper_float_cvts_pl(uint32_t wt0)
2250
{
2251
    uint32_t wt2;
2252

    
2253
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2254
    wt2 = wt0;
2255
    update_fcr31();
2256
    return wt2;
2257
}
2258

    
2259
uint32_t helper_float_cvts_pu(uint32_t wth0)
2260
{
2261
    uint32_t wt2;
2262

    
2263
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2264
    wt2 = wth0;
2265
    update_fcr31();
2266
    return wt2;
2267
}
2268

    
2269
uint32_t helper_float_cvtw_s(uint32_t fst0)
2270
{
2271
    uint32_t wt2;
2272

    
2273
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2274
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2275
    update_fcr31();
2276
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2277
        wt2 = FLOAT_SNAN32;
2278
    return wt2;
2279
}
2280

    
2281
uint32_t helper_float_cvtw_d(uint64_t fdt0)
2282
{
2283
    uint32_t wt2;
2284

    
2285
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2286
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2287
    update_fcr31();
2288
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2289
        wt2 = FLOAT_SNAN32;
2290
    return wt2;
2291
}
2292

    
2293
uint64_t helper_float_roundl_d(uint64_t fdt0)
2294
{
2295
    uint64_t dt2;
2296

    
2297
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2298
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2299
    RESTORE_ROUNDING_MODE;
2300
    update_fcr31();
2301
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2302
        dt2 = FLOAT_SNAN64;
2303
    return dt2;
2304
}
2305

    
2306
uint64_t helper_float_roundl_s(uint32_t fst0)
2307
{
2308
    uint64_t dt2;
2309

    
2310
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2311
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2312
    RESTORE_ROUNDING_MODE;
2313
    update_fcr31();
2314
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2315
        dt2 = FLOAT_SNAN64;
2316
    return dt2;
2317
}
2318

    
2319
uint32_t helper_float_roundw_d(uint64_t fdt0)
2320
{
2321
    uint32_t wt2;
2322

    
2323
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2324
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2325
    RESTORE_ROUNDING_MODE;
2326
    update_fcr31();
2327
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2328
        wt2 = FLOAT_SNAN32;
2329
    return wt2;
2330
}
2331

    
2332
uint32_t helper_float_roundw_s(uint32_t fst0)
2333
{
2334
    uint32_t wt2;
2335

    
2336
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2337
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2338
    RESTORE_ROUNDING_MODE;
2339
    update_fcr31();
2340
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2341
        wt2 = FLOAT_SNAN32;
2342
    return wt2;
2343
}
2344

    
2345
uint64_t helper_float_truncl_d(uint64_t fdt0)
2346
{
2347
    uint64_t dt2;
2348

    
2349
    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2350
    update_fcr31();
2351
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2352
        dt2 = FLOAT_SNAN64;
2353
    return dt2;
2354
}
2355

    
2356
uint64_t helper_float_truncl_s(uint32_t fst0)
2357
{
2358
    uint64_t dt2;
2359

    
2360
    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2361
    update_fcr31();
2362
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2363
        dt2 = FLOAT_SNAN64;
2364
    return dt2;
2365
}
2366

    
2367
uint32_t helper_float_truncw_d(uint64_t fdt0)
2368
{
2369
    uint32_t wt2;
2370

    
2371
    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2372
    update_fcr31();
2373
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2374
        wt2 = FLOAT_SNAN32;
2375
    return wt2;
2376
}
2377

    
2378
uint32_t helper_float_truncw_s(uint32_t fst0)
2379
{
2380
    uint32_t wt2;
2381

    
2382
    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2383
    update_fcr31();
2384
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2385
        wt2 = FLOAT_SNAN32;
2386
    return wt2;
2387
}
2388

    
2389
uint64_t helper_float_ceill_d(uint64_t fdt0)
2390
{
2391
    uint64_t dt2;
2392

    
2393
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2394
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2395
    RESTORE_ROUNDING_MODE;
2396
    update_fcr31();
2397
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2398
        dt2 = FLOAT_SNAN64;
2399
    return dt2;
2400
}
2401

    
2402
uint64_t helper_float_ceill_s(uint32_t fst0)
2403
{
2404
    uint64_t dt2;
2405

    
2406
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2407
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2408
    RESTORE_ROUNDING_MODE;
2409
    update_fcr31();
2410
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2411
        dt2 = FLOAT_SNAN64;
2412
    return dt2;
2413
}
2414

    
2415
uint32_t helper_float_ceilw_d(uint64_t fdt0)
2416
{
2417
    uint32_t wt2;
2418

    
2419
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2420
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2421
    RESTORE_ROUNDING_MODE;
2422
    update_fcr31();
2423
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2424
        wt2 = FLOAT_SNAN32;
2425
    return wt2;
2426
}
2427

    
2428
uint32_t helper_float_ceilw_s(uint32_t fst0)
2429
{
2430
    uint32_t wt2;
2431

    
2432
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2433
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2434
    RESTORE_ROUNDING_MODE;
2435
    update_fcr31();
2436
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2437
        wt2 = FLOAT_SNAN32;
2438
    return wt2;
2439
}
2440

    
2441
uint64_t helper_float_floorl_d(uint64_t fdt0)
2442
{
2443
    uint64_t dt2;
2444

    
2445
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2446
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2447
    RESTORE_ROUNDING_MODE;
2448
    update_fcr31();
2449
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2450
        dt2 = FLOAT_SNAN64;
2451
    return dt2;
2452
}
2453

    
2454
uint64_t helper_float_floorl_s(uint32_t fst0)
2455
{
2456
    uint64_t dt2;
2457

    
2458
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2459
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2460
    RESTORE_ROUNDING_MODE;
2461
    update_fcr31();
2462
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2463
        dt2 = FLOAT_SNAN64;
2464
    return dt2;
2465
}
2466

    
2467
uint32_t helper_float_floorw_d(uint64_t fdt0)
2468
{
2469
    uint32_t wt2;
2470

    
2471
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2472
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2473
    RESTORE_ROUNDING_MODE;
2474
    update_fcr31();
2475
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2476
        wt2 = FLOAT_SNAN32;
2477
    return wt2;
2478
}
2479

    
2480
uint32_t helper_float_floorw_s(uint32_t fst0)
2481
{
2482
    uint32_t wt2;
2483

    
2484
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2485
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2486
    RESTORE_ROUNDING_MODE;
2487
    update_fcr31();
2488
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2489
        wt2 = FLOAT_SNAN32;
2490
    return wt2;
2491
}
2492

    
2493
/* unary operations, not modifying fp status  */
2494
#define FLOAT_UNOP(name)                                       \
2495
uint64_t helper_float_ ## name ## _d(uint64_t fdt0)                \
2496
{                                                              \
2497
    return float64_ ## name(fdt0);                             \
2498
}                                                              \
2499
uint32_t helper_float_ ## name ## _s(uint32_t fst0)                \
2500
{                                                              \
2501
    return float32_ ## name(fst0);                             \
2502
}                                                              \
2503
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0)               \
2504
{                                                              \
2505
    uint32_t wt0;                                              \
2506
    uint32_t wth0;                                             \
2507
                                                               \
2508
    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2509
    wth0 = float32_ ## name(fdt0 >> 32);                       \
2510
    return ((uint64_t)wth0 << 32) | wt0;                       \
2511
}
2512
FLOAT_UNOP(abs)
2513
FLOAT_UNOP(chs)
2514
#undef FLOAT_UNOP
2515

    
2516
/* MIPS specific unary operations */
2517
uint64_t helper_float_recip_d(uint64_t fdt0)
2518
{
2519
    uint64_t fdt2;
2520

    
2521
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2522
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2523
    update_fcr31();
2524
    return fdt2;
2525
}
2526

    
2527
uint32_t helper_float_recip_s(uint32_t fst0)
2528
{
2529
    uint32_t fst2;
2530

    
2531
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2532
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2533
    update_fcr31();
2534
    return fst2;
2535
}
2536

    
2537
uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2538
{
2539
    uint64_t fdt2;
2540

    
2541
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2542
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2543
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2544
    update_fcr31();
2545
    return fdt2;
2546
}
2547

    
2548
uint32_t helper_float_rsqrt_s(uint32_t fst0)
2549
{
2550
    uint32_t fst2;
2551

    
2552
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2553
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2554
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2555
    update_fcr31();
2556
    return fst2;
2557
}
2558

    
2559
uint64_t helper_float_recip1_d(uint64_t fdt0)
2560
{
2561
    uint64_t fdt2;
2562

    
2563
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2564
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2565
    update_fcr31();
2566
    return fdt2;
2567
}
2568

    
2569
uint32_t helper_float_recip1_s(uint32_t fst0)
2570
{
2571
    uint32_t fst2;
2572

    
2573
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2574
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2575
    update_fcr31();
2576
    return fst2;
2577
}
2578

    
2579
uint64_t helper_float_recip1_ps(uint64_t fdt0)
2580
{
2581
    uint32_t fst2;
2582
    uint32_t fsth2;
2583

    
2584
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2585
    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2586
    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2587
    update_fcr31();
2588
    return ((uint64_t)fsth2 << 32) | fst2;
2589
}
2590

    
2591
uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2592
{
2593
    uint64_t fdt2;
2594

    
2595
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2596
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2597
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2598
    update_fcr31();
2599
    return fdt2;
2600
}
2601

    
2602
uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2603
{
2604
    uint32_t fst2;
2605

    
2606
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2607
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2608
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2609
    update_fcr31();
2610
    return fst2;
2611
}
2612

    
2613
uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2614
{
2615
    uint32_t fst2;
2616
    uint32_t fsth2;
2617

    
2618
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2619
    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2620
    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2621
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2622
    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2623
    update_fcr31();
2624
    return ((uint64_t)fsth2 << 32) | fst2;
2625
}
2626

    
2627
#define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2628

    
2629
/* binary operations */
2630
#define FLOAT_BINOP(name)                                          \
2631
uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
2632
{                                                                  \
2633
    uint64_t dt2;                                                  \
2634
                                                                   \
2635
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2636
    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
2637
    update_fcr31();                                                \
2638
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2639
        dt2 = FLOAT_QNAN64;                                        \
2640
    return dt2;                                                    \
2641
}                                                                  \
2642
                                                                   \
2643
uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
2644
{                                                                  \
2645
    uint32_t wt2;                                                  \
2646
                                                                   \
2647
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2648
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2649
    update_fcr31();                                                \
2650
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2651
        wt2 = FLOAT_QNAN32;                                        \
2652
    return wt2;                                                    \
2653
}                                                                  \
2654
                                                                   \
2655
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
2656
{                                                                  \
2657
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2658
    uint32_t fsth0 = fdt0 >> 32;                                   \
2659
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2660
    uint32_t fsth1 = fdt1 >> 32;                                   \
2661
    uint32_t wt2;                                                  \
2662
    uint32_t wth2;                                                 \
2663
                                                                   \
2664
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2665
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2666
    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
2667
    update_fcr31();                                                \
2668
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
2669
        wt2 = FLOAT_QNAN32;                                        \
2670
        wth2 = FLOAT_QNAN32;                                       \
2671
    }                                                              \
2672
    return ((uint64_t)wth2 << 32) | wt2;                           \
2673
}
2674

    
2675
FLOAT_BINOP(add)
2676
FLOAT_BINOP(sub)
2677
FLOAT_BINOP(mul)
2678
FLOAT_BINOP(div)
2679
#undef FLOAT_BINOP
2680

    
2681
/* ternary operations */
2682
#define FLOAT_TERNOP(name1, name2)                                        \
2683
uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
2684
                                           uint64_t fdt2)                 \
2685
{                                                                         \
2686
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2687
    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2688
}                                                                         \
2689
                                                                          \
2690
uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
2691
                                           uint32_t fst2)                 \
2692
{                                                                         \
2693
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2694
    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2695
}                                                                         \
2696
                                                                          \
2697
uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2698
                                            uint64_t fdt2)                \
2699
{                                                                         \
2700
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2701
    uint32_t fsth0 = fdt0 >> 32;                                          \
2702
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2703
    uint32_t fsth1 = fdt1 >> 32;                                          \
2704
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2705
    uint32_t fsth2 = fdt2 >> 32;                                          \
2706
                                                                          \
2707
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2708
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2709
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2710
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2711
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2712
}
2713

    
2714
FLOAT_TERNOP(mul, add)
2715
FLOAT_TERNOP(mul, sub)
2716
#undef FLOAT_TERNOP
2717

    
2718
/* negated ternary operations */
2719
#define FLOAT_NTERNOP(name1, name2)                                       \
2720
uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2721
                                           uint64_t fdt2)                 \
2722
{                                                                         \
2723
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2724
    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2725
    return float64_chs(fdt2);                                             \
2726
}                                                                         \
2727
                                                                          \
2728
uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2729
                                           uint32_t fst2)                 \
2730
{                                                                         \
2731
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2732
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2733
    return float32_chs(fst2);                                             \
2734
}                                                                         \
2735
                                                                          \
2736
uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2737
                                           uint64_t fdt2)                 \
2738
{                                                                         \
2739
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2740
    uint32_t fsth0 = fdt0 >> 32;                                          \
2741
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2742
    uint32_t fsth1 = fdt1 >> 32;                                          \
2743
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2744
    uint32_t fsth2 = fdt2 >> 32;                                          \
2745
                                                                          \
2746
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2747
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2748
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2749
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2750
    fst2 = float32_chs(fst2);                                             \
2751
    fsth2 = float32_chs(fsth2);                                           \
2752
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2753
}
2754

    
2755
FLOAT_NTERNOP(mul, add)
2756
FLOAT_NTERNOP(mul, sub)
2757
#undef FLOAT_NTERNOP
2758

    
2759
/* MIPS specific binary operations */
2760
uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2761
{
2762
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2763
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2764
    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2765
    update_fcr31();
2766
    return fdt2;
2767
}
2768

    
2769
uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
2770
{
2771
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2772
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2773
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2774
    update_fcr31();
2775
    return fst2;
2776
}
2777

    
2778
uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2779
{
2780
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2781
    uint32_t fsth0 = fdt0 >> 32;
2782
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2783
    uint32_t fsth2 = fdt2 >> 32;
2784

    
2785
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2786
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2787
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2788
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2789
    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
2790
    update_fcr31();
2791
    return ((uint64_t)fsth2 << 32) | fst2;
2792
}
2793

    
2794
uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
2795
{
2796
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2797
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2798
    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
2799
    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
2800
    update_fcr31();
2801
    return fdt2;
2802
}
2803

    
2804
uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
2805
{
2806
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2807
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2808
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2809
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2810
    update_fcr31();
2811
    return fst2;
2812
}
2813

    
2814
uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
2815
{
2816
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2817
    uint32_t fsth0 = fdt0 >> 32;
2818
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2819
    uint32_t fsth2 = fdt2 >> 32;
2820

    
2821
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2822
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2823
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2824
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2825
    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
2826
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2827
    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
2828
    update_fcr31();
2829
    return ((uint64_t)fsth2 << 32) | fst2;
2830
}
2831

    
2832
uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
2833
{
2834
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2835
    uint32_t fsth0 = fdt0 >> 32;
2836
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2837
    uint32_t fsth1 = fdt1 >> 32;
2838
    uint32_t fst2;
2839
    uint32_t fsth2;
2840

    
2841
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2842
    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
2843
    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
2844
    update_fcr31();
2845
    return ((uint64_t)fsth2 << 32) | fst2;
2846
}
2847

    
2848
uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
2849
{
2850
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2851
    uint32_t fsth0 = fdt0 >> 32;
2852
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2853
    uint32_t fsth1 = fdt1 >> 32;
2854
    uint32_t fst2;
2855
    uint32_t fsth2;
2856

    
2857
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2858
    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
2859
    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
2860
    update_fcr31();
2861
    return ((uint64_t)fsth2 << 32) | fst2;
2862
}
2863

    
2864
/* compare operations */
2865
#define FOP_COND_D(op, cond)                                   \
2866
void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2867
{                                                              \
2868
    int c = cond;                                              \
2869
    update_fcr31();                                            \
2870
    if (c)                                                     \
2871
        SET_FP_COND(cc, env->active_fpu);                      \
2872
    else                                                       \
2873
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2874
}                                                              \
2875
void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2876
{                                                              \
2877
    int c;                                                     \
2878
    fdt0 = float64_abs(fdt0);                                  \
2879
    fdt1 = float64_abs(fdt1);                                  \
2880
    c = cond;                                                  \
2881
    update_fcr31();                                            \
2882
    if (c)                                                     \
2883
        SET_FP_COND(cc, env->active_fpu);                      \
2884
    else                                                       \
2885
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2886
}
2887

    
2888
static int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
2889
{
2890
    if (float64_is_signaling_nan(a) ||
2891
        float64_is_signaling_nan(b) ||
2892
        (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
2893
        float_raise(float_flag_invalid, status);
2894
        return 1;
2895
    } else if (float64_is_nan(a) || float64_is_nan(b)) {
2896
        return 1;
2897
    } else {
2898
        return 0;
2899
    }
2900
}
2901

    
2902
/* NOTE: the comma operator will make "cond" to eval to false,
2903
 * but float*_is_unordered() is still called. */
2904
FOP_COND_D(f,   (float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2905
FOP_COND_D(un,  float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status))
2906
FOP_COND_D(eq,  !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2907
FOP_COND_D(ueq, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2908
FOP_COND_D(olt, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2909
FOP_COND_D(ult, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2910
FOP_COND_D(ole, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2911
FOP_COND_D(ule, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2912
/* NOTE: the comma operator will make "cond" to eval to false,
2913
 * but float*_is_unordered() is still called. */
2914
FOP_COND_D(sf,  (float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2915
FOP_COND_D(ngle,float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status))
2916
FOP_COND_D(seq, !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2917
FOP_COND_D(ngl, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2918
FOP_COND_D(lt,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2919
FOP_COND_D(nge, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2920
FOP_COND_D(le,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2921
FOP_COND_D(ngt, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2922

    
2923
#define FOP_COND_S(op, cond)                                   \
2924
void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
2925
{                                                              \
2926
    int c = cond;                                              \
2927
    update_fcr31();                                            \
2928
    if (c)                                                     \
2929
        SET_FP_COND(cc, env->active_fpu);                      \
2930
    else                                                       \
2931
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2932
}                                                              \
2933
void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2934
{                                                              \
2935
    int c;                                                     \
2936
    fst0 = float32_abs(fst0);                                  \
2937
    fst1 = float32_abs(fst1);                                  \
2938
    c = cond;                                                  \
2939
    update_fcr31();                                            \
2940
    if (c)                                                     \
2941
        SET_FP_COND(cc, env->active_fpu);                      \
2942
    else                                                       \
2943
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2944
}
2945

    
2946
static flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
2947
{
2948
    if (float32_is_signaling_nan(a) ||
2949
        float32_is_signaling_nan(b) ||
2950
        (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
2951
        float_raise(float_flag_invalid, status);
2952
        return 1;
2953
    } else if (float32_is_nan(a) || float32_is_nan(b)) {
2954
        return 1;
2955
    } else {
2956
        return 0;
2957
    }
2958
}
2959

    
2960
/* NOTE: the comma operator will make "cond" to eval to false,
2961
 * but float*_is_unordered() is still called. */
2962
FOP_COND_S(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0))
2963
FOP_COND_S(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status))
2964
FOP_COND_S(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2965
FOP_COND_S(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2966
FOP_COND_S(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2967
FOP_COND_S(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2968
FOP_COND_S(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2969
FOP_COND_S(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2970
/* NOTE: the comma operator will make "cond" to eval to false,
2971
 * but float*_is_unordered() is still called. */
2972
FOP_COND_S(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0))
2973
FOP_COND_S(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status))
2974
FOP_COND_S(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2975
FOP_COND_S(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2976
FOP_COND_S(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2977
FOP_COND_S(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2978
FOP_COND_S(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2979
FOP_COND_S(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2980

    
2981
#define FOP_COND_PS(op, condl, condh)                           \
2982
void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2983
{                                                               \
2984
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2985
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
2986
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
2987
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
2988
    int cl = condl;                                             \
2989
    int ch = condh;                                             \
2990
                                                                \
2991
    update_fcr31();                                             \
2992
    if (cl)                                                     \
2993
        SET_FP_COND(cc, env->active_fpu);                       \
2994
    else                                                        \
2995
        CLEAR_FP_COND(cc, env->active_fpu);                     \
2996
    if (ch)                                                     \
2997
        SET_FP_COND(cc + 1, env->active_fpu);                   \
2998
    else                                                        \
2999
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3000
}                                                               \
3001
void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3002
{                                                               \
3003
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
3004
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
3005
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
3006
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
3007
    int cl = condl;                                             \
3008
    int ch = condh;                                             \
3009
                                                                \
3010
    update_fcr31();                                             \
3011
    if (cl)                                                     \
3012
        SET_FP_COND(cc, env->active_fpu);                       \
3013
    else                                                        \
3014
        CLEAR_FP_COND(cc, env->active_fpu);                     \
3015
    if (ch)                                                     \
3016
        SET_FP_COND(cc + 1, env->active_fpu);                   \
3017
    else                                                        \
3018
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3019
}
3020

    
3021
/* NOTE: the comma operator will make "cond" to eval to false,
3022
 * but float*_is_unordered() is still called. */
3023
FOP_COND_PS(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0),
3024
                 (float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status), 0))
3025
FOP_COND_PS(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status),
3026
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status))
3027
FOP_COND_PS(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3028
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3029
FOP_COND_PS(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3030
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3031
FOP_COND_PS(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3032
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3033
FOP_COND_PS(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3034
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3035
FOP_COND_PS(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3036
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3037
FOP_COND_PS(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3038
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3039
/* NOTE: the comma operator will make "cond" to eval to false,
3040
 * but float*_is_unordered() is still called. */
3041
FOP_COND_PS(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0),
3042
                 (float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status), 0))
3043
FOP_COND_PS(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status),
3044
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status))
3045
FOP_COND_PS(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3046
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3047
FOP_COND_PS(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3048
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3049
FOP_COND_PS(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3050
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3051
FOP_COND_PS(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3052
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3053
FOP_COND_PS(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3054
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3055
FOP_COND_PS(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3056
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))