Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ 83dae095

History | View | Annotate | Download (96.3 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdlib.h>
20
#include "exec.h"
21

    
22
#include "host-utils.h"
23

    
24
#include "helper.h"
25

    
26
#ifndef CONFIG_USER_ONLY
27
static inline void cpu_mips_tlb_flush (CPUState *env, int flush_global);
28
#endif
29

    
30
/*****************************************************************************/
31
/* Exceptions processing helpers */
32

    
33
void helper_raise_exception_err (uint32_t exception, int error_code)
34
{
35
#if 1
36
    if (exception < 0x100)
37
        qemu_log("%s: %d %d\n", __func__, exception, error_code);
38
#endif
39
    env->exception_index = exception;
40
    env->error_code = error_code;
41
    cpu_loop_exit();
42
}
43

    
44
void helper_raise_exception (uint32_t exception)
45
{
46
    helper_raise_exception_err(exception, 0);
47
}
48

    
49
void helper_interrupt_restart (void)
50
{
51
    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
52
        !(env->CP0_Status & (1 << CP0St_ERL)) &&
53
        !(env->hflags & MIPS_HFLAG_DM) &&
54
        (env->CP0_Status & (1 << CP0St_IE)) &&
55
        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
56
        env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
57
        helper_raise_exception(EXCP_EXT_INTERRUPT);
58
    }
59
}
60

    
61
#if !defined(CONFIG_USER_ONLY)
62
static void do_restore_state (void *pc_ptr)
63
{
64
    TranslationBlock *tb;
65
    unsigned long pc = (unsigned long) pc_ptr;
66
    
67
    tb = tb_find_pc (pc);
68
    if (tb) {
69
        cpu_restore_state (tb, env, pc, NULL);
70
    }
71
}
72
#endif
73

    
74
#if defined(CONFIG_USER_ONLY)
75
#define HELPER_LD(name, insn, type)                                     \
76
static inline type do_##name(target_ulong addr, int mem_idx)            \
77
{                                                                       \
78
    return (type) insn##_raw(addr);                                     \
79
}
80
#else
81
#define HELPER_LD(name, insn, type)                                     \
82
static inline type do_##name(target_ulong addr, int mem_idx)            \
83
{                                                                       \
84
    switch (mem_idx)                                                    \
85
    {                                                                   \
86
    case 0: return (type) insn##_kernel(addr); break;                   \
87
    case 1: return (type) insn##_super(addr); break;                    \
88
    default:                                                            \
89
    case 2: return (type) insn##_user(addr); break;                     \
90
    }                                                                   \
91
}
92
#endif
93
HELPER_LD(lbu, ldub, uint8_t)
94
HELPER_LD(lw, ldl, int32_t)
95
#ifdef TARGET_MIPS64
96
HELPER_LD(ld, ldq, int64_t)
97
#endif
98
#undef HELPER_LD
99

    
100
#if defined(CONFIG_USER_ONLY)
101
#define HELPER_ST(name, insn, type)                                     \
102
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
103
{                                                                       \
104
    insn##_raw(addr, val);                                              \
105
}
106
#else
107
#define HELPER_ST(name, insn, type)                                     \
108
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
109
{                                                                       \
110
    switch (mem_idx)                                                    \
111
    {                                                                   \
112
    case 0: insn##_kernel(addr, val); break;                            \
113
    case 1: insn##_super(addr, val); break;                             \
114
    default:                                                            \
115
    case 2: insn##_user(addr, val); break;                              \
116
    }                                                                   \
117
}
118
#endif
119
HELPER_ST(sb, stb, uint8_t)
120
HELPER_ST(sw, stl, uint32_t)
121
#ifdef TARGET_MIPS64
122
HELPER_ST(sd, stq, uint64_t)
123
#endif
124
#undef HELPER_ST
125

    
126
target_ulong helper_clo (target_ulong arg1)
127
{
128
    return clo32(arg1);
129
}
130

    
131
target_ulong helper_clz (target_ulong arg1)
132
{
133
    return clz32(arg1);
134
}
135

    
136
#if defined(TARGET_MIPS64)
137
target_ulong helper_dclo (target_ulong arg1)
138
{
139
    return clo64(arg1);
140
}
141

    
142
target_ulong helper_dclz (target_ulong arg1)
143
{
144
    return clz64(arg1);
145
}
146
#endif /* TARGET_MIPS64 */
147

    
148
/* 64 bits arithmetic for 32 bits hosts */
149
static inline uint64_t get_HILO (void)
150
{
151
    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
152
}
153

    
154
static inline void set_HILO (uint64_t HILO)
155
{
156
    env->active_tc.LO[0] = (int32_t)HILO;
157
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
158
}
159

    
160
static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
161
{
162
    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
163
    arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
164
}
165

    
166
static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
167
{
168
    arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
169
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
170
}
171

    
172
/* Multiplication variants of the vr54xx. */
173
target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
174
{
175
    set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
176

    
177
    return arg1;
178
}
179

    
180
target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
181
{
182
    set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
183

    
184
    return arg1;
185
}
186

    
187
target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
188
{
189
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
190

    
191
    return arg1;
192
}
193

    
194
target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
195
{
196
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
197

    
198
    return arg1;
199
}
200

    
201
target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
202
{
203
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
204

    
205
    return arg1;
206
}
207

    
208
target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
209
{
210
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
211

    
212
    return arg1;
213
}
214

    
215
target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
216
{
217
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
218

    
219
    return arg1;
220
}
221

    
222
target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
223
{
224
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
225

    
226
    return arg1;
227
}
228

    
229
target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
230
{
231
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
232

    
233
    return arg1;
234
}
235

    
236
target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
237
{
238
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
239

    
240
    return arg1;
241
}
242

    
243
target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
244
{
245
    set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
246

    
247
    return arg1;
248
}
249

    
250
target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
251
{
252
    set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
253

    
254
    return arg1;
255
}
256

    
257
target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
258
{
259
    set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
260

    
261
    return arg1;
262
}
263

    
264
target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
265
{
266
    set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
267

    
268
    return arg1;
269
}
270

    
271
#ifdef TARGET_MIPS64
272
void helper_dmult (target_ulong arg1, target_ulong arg2)
273
{
274
    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
275
}
276

    
277
void helper_dmultu (target_ulong arg1, target_ulong arg2)
278
{
279
    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
280
}
281
#endif
282

    
283
#ifndef CONFIG_USER_ONLY
284

    
285
static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
286
{
287
    target_phys_addr_t lladdr;
288

    
289
    lladdr = cpu_mips_translate_address(env, address, rw);
290

    
291
    if (lladdr == -1LL) {
292
        cpu_loop_exit();
293
    } else {
294
        return lladdr;
295
    }
296
}
297

    
298
#define HELPER_LD_ATOMIC(name, insn)                                          \
299
target_ulong helper_##name(target_ulong arg, int mem_idx)                     \
300
{                                                                             \
301
    env->lladdr = do_translate_address(arg, 0);                               \
302
    env->llval = do_##insn(arg, mem_idx);                                     \
303
    return env->llval;                                                        \
304
}
305
HELPER_LD_ATOMIC(ll, lw)
306
#ifdef TARGET_MIPS64
307
HELPER_LD_ATOMIC(lld, ld)
308
#endif
309
#undef HELPER_LD_ATOMIC
310

    
311
#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask)                      \
312
target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
313
{                                                                             \
314
    target_long tmp;                                                          \
315
                                                                              \
316
    if (arg2 & almask) {                                                      \
317
        env->CP0_BadVAddr = arg2;                                             \
318
        helper_raise_exception(EXCP_AdES);                                    \
319
    }                                                                         \
320
    if (do_translate_address(arg2, 1) == env->lladdr) {                       \
321
        tmp = do_##ld_insn(arg2, mem_idx);                                    \
322
        if (tmp == env->llval) {                                              \
323
            do_##st_insn(arg2, arg1, mem_idx);                                \
324
            return 1;                                                         \
325
        }                                                                     \
326
    }                                                                         \
327
    return 0;                                                                 \
328
}
329
HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
330
#ifdef TARGET_MIPS64
331
HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
332
#endif
333
#undef HELPER_ST_ATOMIC
334
#endif
335

    
336
#ifdef TARGET_WORDS_BIGENDIAN
337
#define GET_LMASK(v) ((v) & 3)
338
#define GET_OFFSET(addr, offset) (addr + (offset))
339
#else
340
#define GET_LMASK(v) (((v) & 3) ^ 3)
341
#define GET_OFFSET(addr, offset) (addr - (offset))
342
#endif
343

    
344
target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
345
{
346
    target_ulong tmp;
347

    
348
    tmp = do_lbu(arg2, mem_idx);
349
    arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
350

    
351
    if (GET_LMASK(arg2) <= 2) {
352
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
353
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
354
    }
355

    
356
    if (GET_LMASK(arg2) <= 1) {
357
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
358
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
359
    }
360

    
361
    if (GET_LMASK(arg2) == 0) {
362
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
363
        arg1 = (arg1 & 0xFFFFFF00) | tmp;
364
    }
365
    return (int32_t)arg1;
366
}
367

    
368
target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
369
{
370
    target_ulong tmp;
371

    
372
    tmp = do_lbu(arg2, mem_idx);
373
    arg1 = (arg1 & 0xFFFFFF00) | tmp;
374

    
375
    if (GET_LMASK(arg2) >= 1) {
376
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
377
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
378
    }
379

    
380
    if (GET_LMASK(arg2) >= 2) {
381
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
382
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
383
    }
384

    
385
    if (GET_LMASK(arg2) == 3) {
386
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
387
        arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
388
    }
389
    return (int32_t)arg1;
390
}
391

    
392
void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
393
{
394
    do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
395

    
396
    if (GET_LMASK(arg2) <= 2)
397
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
398

    
399
    if (GET_LMASK(arg2) <= 1)
400
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
401

    
402
    if (GET_LMASK(arg2) == 0)
403
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
404
}
405

    
406
void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
407
{
408
    do_sb(arg2, (uint8_t)arg1, mem_idx);
409

    
410
    if (GET_LMASK(arg2) >= 1)
411
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
412

    
413
    if (GET_LMASK(arg2) >= 2)
414
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
415

    
416
    if (GET_LMASK(arg2) == 3)
417
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
418
}
419

    
420
#if defined(TARGET_MIPS64)
421
/* "half" load and stores.  We must do the memory access inline,
422
   or fault handling won't work.  */
423

    
424
#ifdef TARGET_WORDS_BIGENDIAN
425
#define GET_LMASK64(v) ((v) & 7)
426
#else
427
#define GET_LMASK64(v) (((v) & 7) ^ 7)
428
#endif
429

    
430
target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
431
{
432
    uint64_t tmp;
433

    
434
    tmp = do_lbu(arg2, mem_idx);
435
    arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
436

    
437
    if (GET_LMASK64(arg2) <= 6) {
438
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
439
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
440
    }
441

    
442
    if (GET_LMASK64(arg2) <= 5) {
443
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
444
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
445
    }
446

    
447
    if (GET_LMASK64(arg2) <= 4) {
448
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
449
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
450
    }
451

    
452
    if (GET_LMASK64(arg2) <= 3) {
453
        tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
454
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
455
    }
456

    
457
    if (GET_LMASK64(arg2) <= 2) {
458
        tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
459
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
460
    }
461

    
462
    if (GET_LMASK64(arg2) <= 1) {
463
        tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
464
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
465
    }
466

    
467
    if (GET_LMASK64(arg2) == 0) {
468
        tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
469
        arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
470
    }
471

    
472
    return arg1;
473
}
474

    
475
target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
476
{
477
    uint64_t tmp;
478

    
479
    tmp = do_lbu(arg2, mem_idx);
480
    arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
481

    
482
    if (GET_LMASK64(arg2) >= 1) {
483
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
484
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
485
    }
486

    
487
    if (GET_LMASK64(arg2) >= 2) {
488
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
489
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
490
    }
491

    
492
    if (GET_LMASK64(arg2) >= 3) {
493
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
494
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
495
    }
496

    
497
    if (GET_LMASK64(arg2) >= 4) {
498
        tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
499
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
500
    }
501

    
502
    if (GET_LMASK64(arg2) >= 5) {
503
        tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
504
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
505
    }
506

    
507
    if (GET_LMASK64(arg2) >= 6) {
508
        tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
509
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
510
    }
511

    
512
    if (GET_LMASK64(arg2) == 7) {
513
        tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
514
        arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
515
    }
516

    
517
    return arg1;
518
}
519

    
520
void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
521
{
522
    do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
523

    
524
    if (GET_LMASK64(arg2) <= 6)
525
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
526

    
527
    if (GET_LMASK64(arg2) <= 5)
528
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
529

    
530
    if (GET_LMASK64(arg2) <= 4)
531
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
532

    
533
    if (GET_LMASK64(arg2) <= 3)
534
        do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
535

    
536
    if (GET_LMASK64(arg2) <= 2)
537
        do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
538

    
539
    if (GET_LMASK64(arg2) <= 1)
540
        do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
541

    
542
    if (GET_LMASK64(arg2) <= 0)
543
        do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
544
}
545

    
546
void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
547
{
548
    do_sb(arg2, (uint8_t)arg1, mem_idx);
549

    
550
    if (GET_LMASK64(arg2) >= 1)
551
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
552

    
553
    if (GET_LMASK64(arg2) >= 2)
554
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
555

    
556
    if (GET_LMASK64(arg2) >= 3)
557
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
558

    
559
    if (GET_LMASK64(arg2) >= 4)
560
        do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
561

    
562
    if (GET_LMASK64(arg2) >= 5)
563
        do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
564

    
565
    if (GET_LMASK64(arg2) >= 6)
566
        do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
567

    
568
    if (GET_LMASK64(arg2) == 7)
569
        do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
570
}
571
#endif /* TARGET_MIPS64 */
572

    
573
static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
574

    
575
void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
576
{
577
    target_ulong base_reglist = reglist & 0xf;
578
    target_ulong do_r31 = reglist & 0x10;
579
#ifdef CONFIG_USER_ONLY
580
#undef ldfun
581
#define ldfun ldl_raw
582
#else
583
    uint32_t (*ldfun)(target_ulong);
584

    
585
    switch (mem_idx)
586
    {
587
    case 0: ldfun = ldl_kernel; break;
588
    case 1: ldfun = ldl_super; break;
589
    default:
590
    case 2: ldfun = ldl_user; break;
591
    }
592
#endif
593

    
594
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
595
        target_ulong i;
596

    
597
        for (i = 0; i < base_reglist; i++) {
598
            env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
599
            addr += 4;
600
        }
601
    }
602

    
603
    if (do_r31) {
604
        env->active_tc.gpr[31] = (target_long) ldfun(addr);
605
    }
606
}
607

    
608
void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
609
{
610
    target_ulong base_reglist = reglist & 0xf;
611
    target_ulong do_r31 = reglist & 0x10;
612
#ifdef CONFIG_USER_ONLY
613
#undef stfun
614
#define stfun stl_raw
615
#else
616
    void (*stfun)(target_ulong, uint32_t);
617

    
618
    switch (mem_idx)
619
    {
620
    case 0: stfun = stl_kernel; break;
621
    case 1: stfun = stl_super; break;
622
     default:
623
    case 2: stfun = stl_user; break;
624
    }
625
#endif
626

    
627
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
628
        target_ulong i;
629

    
630
        for (i = 0; i < base_reglist; i++) {
631
            stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
632
            addr += 4;
633
        }
634
    }
635

    
636
    if (do_r31) {
637
        stfun(addr, env->active_tc.gpr[31]);
638
    }
639
}
640

    
641
#if defined(TARGET_MIPS64)
642
void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
643
{
644
    target_ulong base_reglist = reglist & 0xf;
645
    target_ulong do_r31 = reglist & 0x10;
646
#ifdef CONFIG_USER_ONLY
647
#undef ldfun
648
#define ldfun ldq_raw
649
#else
650
    uint64_t (*ldfun)(target_ulong);
651

    
652
    switch (mem_idx)
653
    {
654
    case 0: ldfun = ldq_kernel; break;
655
    case 1: ldfun = ldq_super; break;
656
    default:
657
    case 2: ldfun = ldq_user; break;
658
    }
659
#endif
660

    
661
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
662
        target_ulong i;
663

    
664
        for (i = 0; i < base_reglist; i++) {
665
            env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
666
            addr += 8;
667
        }
668
    }
669

    
670
    if (do_r31) {
671
        env->active_tc.gpr[31] = ldfun(addr);
672
    }
673
}
674

    
675
void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
676
{
677
    target_ulong base_reglist = reglist & 0xf;
678
    target_ulong do_r31 = reglist & 0x10;
679
#ifdef CONFIG_USER_ONLY
680
#undef stfun
681
#define stfun stq_raw
682
#else
683
    void (*stfun)(target_ulong, uint64_t);
684

    
685
    switch (mem_idx)
686
    {
687
    case 0: stfun = stq_kernel; break;
688
    case 1: stfun = stq_super; break;
689
     default:
690
    case 2: stfun = stq_user; break;
691
    }
692
#endif
693

    
694
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
695
        target_ulong i;
696

    
697
        for (i = 0; i < base_reglist; i++) {
698
            stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
699
            addr += 8;
700
        }
701
    }
702

    
703
    if (do_r31) {
704
        stfun(addr, env->active_tc.gpr[31]);
705
    }
706
}
707
#endif
708

    
709
#ifndef CONFIG_USER_ONLY
710
/* CP0 helpers */
711
target_ulong helper_mfc0_mvpcontrol (void)
712
{
713
    return env->mvp->CP0_MVPControl;
714
}
715

    
716
target_ulong helper_mfc0_mvpconf0 (void)
717
{
718
    return env->mvp->CP0_MVPConf0;
719
}
720

    
721
target_ulong helper_mfc0_mvpconf1 (void)
722
{
723
    return env->mvp->CP0_MVPConf1;
724
}
725

    
726
target_ulong helper_mfc0_random (void)
727
{
728
    return (int32_t)cpu_mips_get_random(env);
729
}
730

    
731
target_ulong helper_mfc0_tcstatus (void)
732
{
733
    return env->active_tc.CP0_TCStatus;
734
}
735

    
736
target_ulong helper_mftc0_tcstatus(void)
737
{
738
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
739

    
740
    if (other_tc == env->current_tc)
741
        return env->active_tc.CP0_TCStatus;
742
    else
743
        return env->tcs[other_tc].CP0_TCStatus;
744
}
745

    
746
target_ulong helper_mfc0_tcbind (void)
747
{
748
    return env->active_tc.CP0_TCBind;
749
}
750

    
751
target_ulong helper_mftc0_tcbind(void)
752
{
753
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
754

    
755
    if (other_tc == env->current_tc)
756
        return env->active_tc.CP0_TCBind;
757
    else
758
        return env->tcs[other_tc].CP0_TCBind;
759
}
760

    
761
target_ulong helper_mfc0_tcrestart (void)
762
{
763
    return env->active_tc.PC;
764
}
765

    
766
target_ulong helper_mftc0_tcrestart(void)
767
{
768
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
769

    
770
    if (other_tc == env->current_tc)
771
        return env->active_tc.PC;
772
    else
773
        return env->tcs[other_tc].PC;
774
}
775

    
776
target_ulong helper_mfc0_tchalt (void)
777
{
778
    return env->active_tc.CP0_TCHalt;
779
}
780

    
781
target_ulong helper_mftc0_tchalt(void)
782
{
783
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
784

    
785
    if (other_tc == env->current_tc)
786
        return env->active_tc.CP0_TCHalt;
787
    else
788
        return env->tcs[other_tc].CP0_TCHalt;
789
}
790

    
791
target_ulong helper_mfc0_tccontext (void)
792
{
793
    return env->active_tc.CP0_TCContext;
794
}
795

    
796
target_ulong helper_mftc0_tccontext(void)
797
{
798
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
799

    
800
    if (other_tc == env->current_tc)
801
        return env->active_tc.CP0_TCContext;
802
    else
803
        return env->tcs[other_tc].CP0_TCContext;
804
}
805

    
806
target_ulong helper_mfc0_tcschedule (void)
807
{
808
    return env->active_tc.CP0_TCSchedule;
809
}
810

    
811
target_ulong helper_mftc0_tcschedule(void)
812
{
813
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
814

    
815
    if (other_tc == env->current_tc)
816
        return env->active_tc.CP0_TCSchedule;
817
    else
818
        return env->tcs[other_tc].CP0_TCSchedule;
819
}
820

    
821
target_ulong helper_mfc0_tcschefback (void)
822
{
823
    return env->active_tc.CP0_TCScheFBack;
824
}
825

    
826
target_ulong helper_mftc0_tcschefback(void)
827
{
828
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
829

    
830
    if (other_tc == env->current_tc)
831
        return env->active_tc.CP0_TCScheFBack;
832
    else
833
        return env->tcs[other_tc].CP0_TCScheFBack;
834
}
835

    
836
target_ulong helper_mfc0_count (void)
837
{
838
    return (int32_t)cpu_mips_get_count(env);
839
}
840

    
841
target_ulong helper_mftc0_entryhi(void)
842
{
843
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
844
    int32_t tcstatus;
845

    
846
    if (other_tc == env->current_tc)
847
        tcstatus = env->active_tc.CP0_TCStatus;
848
    else
849
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
850

    
851
    return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
852
}
853

    
854
target_ulong helper_mftc0_status(void)
855
{
856
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
857
    target_ulong t0;
858
    int32_t tcstatus;
859

    
860
    if (other_tc == env->current_tc)
861
        tcstatus = env->active_tc.CP0_TCStatus;
862
    else
863
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
864

    
865
    t0 = env->CP0_Status & ~0xf1000018;
866
    t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
867
    t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
868
    t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
869

    
870
    return t0;
871
}
872

    
873
target_ulong helper_mfc0_lladdr (void)
874
{
875
    return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
876
}
877

    
878
target_ulong helper_mfc0_watchlo (uint32_t sel)
879
{
880
    return (int32_t)env->CP0_WatchLo[sel];
881
}
882

    
883
target_ulong helper_mfc0_watchhi (uint32_t sel)
884
{
885
    return env->CP0_WatchHi[sel];
886
}
887

    
888
target_ulong helper_mfc0_debug (void)
889
{
890
    target_ulong t0 = env->CP0_Debug;
891
    if (env->hflags & MIPS_HFLAG_DM)
892
        t0 |= 1 << CP0DB_DM;
893

    
894
    return t0;
895
}
896

    
897
target_ulong helper_mftc0_debug(void)
898
{
899
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
900
    int32_t tcstatus;
901

    
902
    if (other_tc == env->current_tc)
903
        tcstatus = env->active_tc.CP0_Debug_tcstatus;
904
    else
905
        tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
906

    
907
    /* XXX: Might be wrong, check with EJTAG spec. */
908
    return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
909
            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
910
}
911

    
912
#if defined(TARGET_MIPS64)
913
target_ulong helper_dmfc0_tcrestart (void)
914
{
915
    return env->active_tc.PC;
916
}
917

    
918
target_ulong helper_dmfc0_tchalt (void)
919
{
920
    return env->active_tc.CP0_TCHalt;
921
}
922

    
923
target_ulong helper_dmfc0_tccontext (void)
924
{
925
    return env->active_tc.CP0_TCContext;
926
}
927

    
928
target_ulong helper_dmfc0_tcschedule (void)
929
{
930
    return env->active_tc.CP0_TCSchedule;
931
}
932

    
933
target_ulong helper_dmfc0_tcschefback (void)
934
{
935
    return env->active_tc.CP0_TCScheFBack;
936
}
937

    
938
target_ulong helper_dmfc0_lladdr (void)
939
{
940
    return env->lladdr >> env->CP0_LLAddr_shift;
941
}
942

    
943
target_ulong helper_dmfc0_watchlo (uint32_t sel)
944
{
945
    return env->CP0_WatchLo[sel];
946
}
947
#endif /* TARGET_MIPS64 */
948

    
949
void helper_mtc0_index (target_ulong arg1)
950
{
951
    int num = 1;
952
    unsigned int tmp = env->tlb->nb_tlb;
953

    
954
    do {
955
        tmp >>= 1;
956
        num <<= 1;
957
    } while (tmp);
958
    env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
959
}
960

    
961
void helper_mtc0_mvpcontrol (target_ulong arg1)
962
{
963
    uint32_t mask = 0;
964
    uint32_t newval;
965

    
966
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
967
        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
968
                (1 << CP0MVPCo_EVP);
969
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
970
        mask |= (1 << CP0MVPCo_STLB);
971
    newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
972

    
973
    // TODO: Enable/disable shared TLB, enable/disable VPEs.
974

    
975
    env->mvp->CP0_MVPControl = newval;
976
}
977

    
978
void helper_mtc0_vpecontrol (target_ulong arg1)
979
{
980
    uint32_t mask;
981
    uint32_t newval;
982

    
983
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
984
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
985
    newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
986

    
987
    /* Yield scheduler intercept not implemented. */
988
    /* Gating storage scheduler intercept not implemented. */
989

    
990
    // TODO: Enable/disable TCs.
991

    
992
    env->CP0_VPEControl = newval;
993
}
994

    
995
void helper_mtc0_vpeconf0 (target_ulong arg1)
996
{
997
    uint32_t mask = 0;
998
    uint32_t newval;
999

    
1000
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1001
        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1002
            mask |= (0xff << CP0VPEC0_XTC);
1003
        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1004
    }
1005
    newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1006

    
1007
    // TODO: TC exclusive handling due to ERL/EXL.
1008

    
1009
    env->CP0_VPEConf0 = newval;
1010
}
1011

    
1012
void helper_mtc0_vpeconf1 (target_ulong arg1)
1013
{
1014
    uint32_t mask = 0;
1015
    uint32_t newval;
1016

    
1017
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1018
        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1019
                (0xff << CP0VPEC1_NCP1);
1020
    newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1021

    
1022
    /* UDI not implemented. */
1023
    /* CP2 not implemented. */
1024

    
1025
    // TODO: Handle FPU (CP1) binding.
1026

    
1027
    env->CP0_VPEConf1 = newval;
1028
}
1029

    
1030
void helper_mtc0_yqmask (target_ulong arg1)
1031
{
1032
    /* Yield qualifier inputs not implemented. */
1033
    env->CP0_YQMask = 0x00000000;
1034
}
1035

    
1036
void helper_mtc0_vpeopt (target_ulong arg1)
1037
{
1038
    env->CP0_VPEOpt = arg1 & 0x0000ffff;
1039
}
1040

    
1041
void helper_mtc0_entrylo0 (target_ulong arg1)
1042
{
1043
    /* Large physaddr (PABITS) not implemented */
1044
    /* 1k pages not implemented */
1045
    env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1046
}
1047

    
1048
void helper_mtc0_tcstatus (target_ulong arg1)
1049
{
1050
    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1051
    uint32_t newval;
1052

    
1053
    newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1054

    
1055
    // TODO: Sync with CP0_Status.
1056

    
1057
    env->active_tc.CP0_TCStatus = newval;
1058
}
1059

    
1060
void helper_mttc0_tcstatus (target_ulong arg1)
1061
{
1062
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1063

    
1064
    // TODO: Sync with CP0_Status.
1065

    
1066
    if (other_tc == env->current_tc)
1067
        env->active_tc.CP0_TCStatus = arg1;
1068
    else
1069
        env->tcs[other_tc].CP0_TCStatus = arg1;
1070
}
1071

    
1072
void helper_mtc0_tcbind (target_ulong arg1)
1073
{
1074
    uint32_t mask = (1 << CP0TCBd_TBE);
1075
    uint32_t newval;
1076

    
1077
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1078
        mask |= (1 << CP0TCBd_CurVPE);
1079
    newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1080
    env->active_tc.CP0_TCBind = newval;
1081
}
1082

    
1083
void helper_mttc0_tcbind (target_ulong arg1)
1084
{
1085
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1086
    uint32_t mask = (1 << CP0TCBd_TBE);
1087
    uint32_t newval;
1088

    
1089
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1090
        mask |= (1 << CP0TCBd_CurVPE);
1091
    if (other_tc == env->current_tc) {
1092
        newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1093
        env->active_tc.CP0_TCBind = newval;
1094
    } else {
1095
        newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1096
        env->tcs[other_tc].CP0_TCBind = newval;
1097
    }
1098
}
1099

    
1100
void helper_mtc0_tcrestart (target_ulong arg1)
1101
{
1102
    env->active_tc.PC = arg1;
1103
    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1104
    env->lladdr = 0ULL;
1105
    /* MIPS16 not implemented. */
1106
}
1107

    
1108
void helper_mttc0_tcrestart (target_ulong arg1)
1109
{
1110
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1111

    
1112
    if (other_tc == env->current_tc) {
1113
        env->active_tc.PC = arg1;
1114
        env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1115
        env->lladdr = 0ULL;
1116
        /* MIPS16 not implemented. */
1117
    } else {
1118
        env->tcs[other_tc].PC = arg1;
1119
        env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1120
        env->lladdr = 0ULL;
1121
        /* MIPS16 not implemented. */
1122
    }
1123
}
1124

    
1125
void helper_mtc0_tchalt (target_ulong arg1)
1126
{
1127
    env->active_tc.CP0_TCHalt = arg1 & 0x1;
1128

    
1129
    // TODO: Halt TC / Restart (if allocated+active) TC.
1130
}
1131

    
1132
void helper_mttc0_tchalt (target_ulong arg1)
1133
{
1134
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1135

    
1136
    // TODO: Halt TC / Restart (if allocated+active) TC.
1137

    
1138
    if (other_tc == env->current_tc)
1139
        env->active_tc.CP0_TCHalt = arg1;
1140
    else
1141
        env->tcs[other_tc].CP0_TCHalt = arg1;
1142
}
1143

    
1144
void helper_mtc0_tccontext (target_ulong arg1)
1145
{
1146
    env->active_tc.CP0_TCContext = arg1;
1147
}
1148

    
1149
void helper_mttc0_tccontext (target_ulong arg1)
1150
{
1151
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1152

    
1153
    if (other_tc == env->current_tc)
1154
        env->active_tc.CP0_TCContext = arg1;
1155
    else
1156
        env->tcs[other_tc].CP0_TCContext = arg1;
1157
}
1158

    
1159
void helper_mtc0_tcschedule (target_ulong arg1)
1160
{
1161
    env->active_tc.CP0_TCSchedule = arg1;
1162
}
1163

    
1164
void helper_mttc0_tcschedule (target_ulong arg1)
1165
{
1166
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1167

    
1168
    if (other_tc == env->current_tc)
1169
        env->active_tc.CP0_TCSchedule = arg1;
1170
    else
1171
        env->tcs[other_tc].CP0_TCSchedule = arg1;
1172
}
1173

    
1174
void helper_mtc0_tcschefback (target_ulong arg1)
1175
{
1176
    env->active_tc.CP0_TCScheFBack = arg1;
1177
}
1178

    
1179
void helper_mttc0_tcschefback (target_ulong arg1)
1180
{
1181
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1182

    
1183
    if (other_tc == env->current_tc)
1184
        env->active_tc.CP0_TCScheFBack = arg1;
1185
    else
1186
        env->tcs[other_tc].CP0_TCScheFBack = arg1;
1187
}
1188

    
1189
void helper_mtc0_entrylo1 (target_ulong arg1)
1190
{
1191
    /* Large physaddr (PABITS) not implemented */
1192
    /* 1k pages not implemented */
1193
    env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1194
}
1195

    
1196
void helper_mtc0_context (target_ulong arg1)
1197
{
1198
    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1199
}
1200

    
1201
void helper_mtc0_pagemask (target_ulong arg1)
1202
{
1203
    /* 1k pages not implemented */
1204
    env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1205
}
1206

    
1207
void helper_mtc0_pagegrain (target_ulong arg1)
1208
{
1209
    /* SmartMIPS not implemented */
1210
    /* Large physaddr (PABITS) not implemented */
1211
    /* 1k pages not implemented */
1212
    env->CP0_PageGrain = 0;
1213
}
1214

    
1215
void helper_mtc0_wired (target_ulong arg1)
1216
{
1217
    env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1218
}
1219

    
1220
void helper_mtc0_srsconf0 (target_ulong arg1)
1221
{
1222
    env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1223
}
1224

    
1225
void helper_mtc0_srsconf1 (target_ulong arg1)
1226
{
1227
    env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1228
}
1229

    
1230
void helper_mtc0_srsconf2 (target_ulong arg1)
1231
{
1232
    env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1233
}
1234

    
1235
void helper_mtc0_srsconf3 (target_ulong arg1)
1236
{
1237
    env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1238
}
1239

    
1240
void helper_mtc0_srsconf4 (target_ulong arg1)
1241
{
1242
    env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1243
}
1244

    
1245
void helper_mtc0_hwrena (target_ulong arg1)
1246
{
1247
    env->CP0_HWREna = arg1 & 0x0000000F;
1248
}
1249

    
1250
void helper_mtc0_count (target_ulong arg1)
1251
{
1252
    cpu_mips_store_count(env, arg1);
1253
}
1254

    
1255
void helper_mtc0_entryhi (target_ulong arg1)
1256
{
1257
    target_ulong old, val;
1258

    
1259
    /* 1k pages not implemented */
1260
    val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1261
#if defined(TARGET_MIPS64)
1262
    val &= env->SEGMask;
1263
#endif
1264
    old = env->CP0_EntryHi;
1265
    env->CP0_EntryHi = val;
1266
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1267
        uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
1268
        env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
1269
    }
1270
    /* If the ASID changes, flush qemu's TLB.  */
1271
    if ((old & 0xFF) != (val & 0xFF))
1272
        cpu_mips_tlb_flush(env, 1);
1273
}
1274

    
1275
void helper_mttc0_entryhi(target_ulong arg1)
1276
{
1277
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1278
    int32_t tcstatus;
1279

    
1280
    env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (arg1 & ~0xff);
1281
    if (other_tc == env->current_tc) {
1282
        tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1283
        env->active_tc.CP0_TCStatus = tcstatus;
1284
    } else {
1285
        tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1286
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1287
    }
1288
}
1289

    
1290
void helper_mtc0_compare (target_ulong arg1)
1291
{
1292
    cpu_mips_store_compare(env, arg1);
1293
}
1294

    
1295
void helper_mtc0_status (target_ulong arg1)
1296
{
1297
    uint32_t val, old;
1298
    uint32_t mask = env->CP0_Status_rw_bitmask;
1299

    
1300
    val = arg1 & mask;
1301
    old = env->CP0_Status;
1302
    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1303
    compute_hflags(env);
1304
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1305
        qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1306
                old, old & env->CP0_Cause & CP0Ca_IP_mask,
1307
                val, val & env->CP0_Cause & CP0Ca_IP_mask,
1308
                env->CP0_Cause);
1309
        switch (env->hflags & MIPS_HFLAG_KSU) {
1310
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1311
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1312
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1313
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1314
        }
1315
    }
1316
    cpu_mips_update_irq(env);
1317
}
1318

    
1319
void helper_mttc0_status(target_ulong arg1)
1320
{
1321
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1322
    int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
1323

    
1324
    env->CP0_Status = arg1 & ~0xf1000018;
1325
    tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (arg1 & (0xf << CP0St_CU0));
1326
    tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((arg1 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
1327
    tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((arg1 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
1328
    if (other_tc == env->current_tc)
1329
        env->active_tc.CP0_TCStatus = tcstatus;
1330
    else
1331
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1332
}
1333

    
1334
void helper_mtc0_intctl (target_ulong arg1)
1335
{
1336
    /* vectored interrupts not implemented, no performance counters. */
1337
    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (arg1 & 0x000002e0);
1338
}
1339

    
1340
void helper_mtc0_srsctl (target_ulong arg1)
1341
{
1342
    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1343
    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1344
}
1345

    
1346
void helper_mtc0_cause (target_ulong arg1)
1347
{
1348
    uint32_t mask = 0x00C00300;
1349
    uint32_t old = env->CP0_Cause;
1350

    
1351
    if (env->insn_flags & ISA_MIPS32R2)
1352
        mask |= 1 << CP0Ca_DC;
1353

    
1354
    env->CP0_Cause = (env->CP0_Cause & ~mask) | (arg1 & mask);
1355

    
1356
    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1357
        if (env->CP0_Cause & (1 << CP0Ca_DC))
1358
            cpu_mips_stop_count(env);
1359
        else
1360
            cpu_mips_start_count(env);
1361
    }
1362

    
1363
    /* Handle the software interrupt as an hardware one, as they
1364
       are very similar */
1365
    if (arg1 & CP0Ca_IP_mask) {
1366
        cpu_mips_update_irq(env);
1367
    }
1368
}
1369

    
1370
void helper_mtc0_ebase (target_ulong arg1)
1371
{
1372
    /* vectored interrupts not implemented */
1373
    /* Multi-CPU not implemented */
1374
    env->CP0_EBase = 0x80000000 | (arg1 & 0x3FFFF000);
1375
}
1376

    
1377
void helper_mtc0_config0 (target_ulong arg1)
1378
{
1379
    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1380
}
1381

    
1382
void helper_mtc0_config2 (target_ulong arg1)
1383
{
1384
    /* tertiary/secondary caches not implemented */
1385
    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1386
}
1387

    
1388
void helper_mtc0_lladdr (target_ulong arg1)
1389
{
1390
    target_long mask = env->CP0_LLAddr_rw_bitmask;
1391
    arg1 = arg1 << env->CP0_LLAddr_shift;
1392
    env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1393
}
1394

    
1395
void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1396
{
1397
    /* Watch exceptions for instructions, data loads, data stores
1398
       not implemented. */
1399
    env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1400
}
1401

    
1402
void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1403
{
1404
    env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1405
    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1406
}
1407

    
1408
void helper_mtc0_xcontext (target_ulong arg1)
1409
{
1410
    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1411
    env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1412
}
1413

    
1414
void helper_mtc0_framemask (target_ulong arg1)
1415
{
1416
    env->CP0_Framemask = arg1; /* XXX */
1417
}
1418

    
1419
void helper_mtc0_debug (target_ulong arg1)
1420
{
1421
    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1422
    if (arg1 & (1 << CP0DB_DM))
1423
        env->hflags |= MIPS_HFLAG_DM;
1424
    else
1425
        env->hflags &= ~MIPS_HFLAG_DM;
1426
}
1427

    
1428
void helper_mttc0_debug(target_ulong arg1)
1429
{
1430
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1431
    uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1432

    
1433
    /* XXX: Might be wrong, check with EJTAG spec. */
1434
    if (other_tc == env->current_tc)
1435
        env->active_tc.CP0_Debug_tcstatus = val;
1436
    else
1437
        env->tcs[other_tc].CP0_Debug_tcstatus = val;
1438
    env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1439
                     (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1440
}
1441

    
1442
void helper_mtc0_performance0 (target_ulong arg1)
1443
{
1444
    env->CP0_Performance0 = arg1 & 0x000007ff;
1445
}
1446

    
1447
void helper_mtc0_taglo (target_ulong arg1)
1448
{
1449
    env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1450
}
1451

    
1452
void helper_mtc0_datalo (target_ulong arg1)
1453
{
1454
    env->CP0_DataLo = arg1; /* XXX */
1455
}
1456

    
1457
void helper_mtc0_taghi (target_ulong arg1)
1458
{
1459
    env->CP0_TagHi = arg1; /* XXX */
1460
}
1461

    
1462
void helper_mtc0_datahi (target_ulong arg1)
1463
{
1464
    env->CP0_DataHi = arg1; /* XXX */
1465
}
1466

    
1467
/* MIPS MT functions */
1468
target_ulong helper_mftgpr(uint32_t sel)
1469
{
1470
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1471

    
1472
    if (other_tc == env->current_tc)
1473
        return env->active_tc.gpr[sel];
1474
    else
1475
        return env->tcs[other_tc].gpr[sel];
1476
}
1477

    
1478
target_ulong helper_mftlo(uint32_t sel)
1479
{
1480
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1481

    
1482
    if (other_tc == env->current_tc)
1483
        return env->active_tc.LO[sel];
1484
    else
1485
        return env->tcs[other_tc].LO[sel];
1486
}
1487

    
1488
target_ulong helper_mfthi(uint32_t sel)
1489
{
1490
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1491

    
1492
    if (other_tc == env->current_tc)
1493
        return env->active_tc.HI[sel];
1494
    else
1495
        return env->tcs[other_tc].HI[sel];
1496
}
1497

    
1498
target_ulong helper_mftacx(uint32_t sel)
1499
{
1500
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1501

    
1502
    if (other_tc == env->current_tc)
1503
        return env->active_tc.ACX[sel];
1504
    else
1505
        return env->tcs[other_tc].ACX[sel];
1506
}
1507

    
1508
target_ulong helper_mftdsp(void)
1509
{
1510
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1511

    
1512
    if (other_tc == env->current_tc)
1513
        return env->active_tc.DSPControl;
1514
    else
1515
        return env->tcs[other_tc].DSPControl;
1516
}
1517

    
1518
void helper_mttgpr(target_ulong arg1, uint32_t sel)
1519
{
1520
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1521

    
1522
    if (other_tc == env->current_tc)
1523
        env->active_tc.gpr[sel] = arg1;
1524
    else
1525
        env->tcs[other_tc].gpr[sel] = arg1;
1526
}
1527

    
1528
void helper_mttlo(target_ulong arg1, uint32_t sel)
1529
{
1530
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1531

    
1532
    if (other_tc == env->current_tc)
1533
        env->active_tc.LO[sel] = arg1;
1534
    else
1535
        env->tcs[other_tc].LO[sel] = arg1;
1536
}
1537

    
1538
void helper_mtthi(target_ulong arg1, uint32_t sel)
1539
{
1540
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1541

    
1542
    if (other_tc == env->current_tc)
1543
        env->active_tc.HI[sel] = arg1;
1544
    else
1545
        env->tcs[other_tc].HI[sel] = arg1;
1546
}
1547

    
1548
void helper_mttacx(target_ulong arg1, uint32_t sel)
1549
{
1550
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1551

    
1552
    if (other_tc == env->current_tc)
1553
        env->active_tc.ACX[sel] = arg1;
1554
    else
1555
        env->tcs[other_tc].ACX[sel] = arg1;
1556
}
1557

    
1558
void helper_mttdsp(target_ulong arg1)
1559
{
1560
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1561

    
1562
    if (other_tc == env->current_tc)
1563
        env->active_tc.DSPControl = arg1;
1564
    else
1565
        env->tcs[other_tc].DSPControl = arg1;
1566
}
1567

    
1568
/* MIPS MT functions */
1569
target_ulong helper_dmt(target_ulong arg1)
1570
{
1571
    // TODO
1572
    arg1 = 0;
1573
    // rt = arg1
1574

    
1575
    return arg1;
1576
}
1577

    
1578
target_ulong helper_emt(target_ulong arg1)
1579
{
1580
    // TODO
1581
    arg1 = 0;
1582
    // rt = arg1
1583

    
1584
    return arg1;
1585
}
1586

    
1587
target_ulong helper_dvpe(target_ulong arg1)
1588
{
1589
    // TODO
1590
    arg1 = 0;
1591
    // rt = arg1
1592

    
1593
    return arg1;
1594
}
1595

    
1596
target_ulong helper_evpe(target_ulong arg1)
1597
{
1598
    // TODO
1599
    arg1 = 0;
1600
    // rt = arg1
1601

    
1602
    return arg1;
1603
}
1604
#endif /* !CONFIG_USER_ONLY */
1605

    
1606
void helper_fork(target_ulong arg1, target_ulong arg2)
1607
{
1608
    // arg1 = rt, arg2 = rs
1609
    arg1 = 0;
1610
    // TODO: store to TC register
1611
}
1612

    
1613
target_ulong helper_yield(target_ulong arg1)
1614
{
1615
    if (arg1 < 0) {
1616
        /* No scheduling policy implemented. */
1617
        if (arg1 != -2) {
1618
            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1619
                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1620
                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1621
                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1622
                helper_raise_exception(EXCP_THREAD);
1623
            }
1624
        }
1625
    } else if (arg1 == 0) {
1626
        if (0 /* TODO: TC underflow */) {
1627
            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1628
            helper_raise_exception(EXCP_THREAD);
1629
        } else {
1630
            // TODO: Deallocate TC
1631
        }
1632
    } else if (arg1 > 0) {
1633
        /* Yield qualifier inputs not implemented. */
1634
        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1635
        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1636
        helper_raise_exception(EXCP_THREAD);
1637
    }
1638
    return env->CP0_YQMask;
1639
}
1640

    
1641
#ifndef CONFIG_USER_ONLY
1642
/* TLB management */
1643
static void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1644
{
1645
    /* Flush qemu's TLB and discard all shadowed entries.  */
1646
    tlb_flush (env, flush_global);
1647
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
1648
}
1649

    
1650
static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1651
{
1652
    /* Discard entries from env->tlb[first] onwards.  */
1653
    while (env->tlb->tlb_in_use > first) {
1654
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1655
    }
1656
}
1657

    
1658
static void r4k_fill_tlb (int idx)
1659
{
1660
    r4k_tlb_t *tlb;
1661

    
1662
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1663
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1664
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1665
#if defined(TARGET_MIPS64)
1666
    tlb->VPN &= env->SEGMask;
1667
#endif
1668
    tlb->ASID = env->CP0_EntryHi & 0xFF;
1669
    tlb->PageMask = env->CP0_PageMask;
1670
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1671
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1672
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1673
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1674
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1675
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1676
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1677
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1678
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1679
}
1680

    
1681
void r4k_helper_tlbwi (void)
1682
{
1683
    int idx;
1684

    
1685
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1686

    
1687
    /* Discard cached TLB entries.  We could avoid doing this if the
1688
       tlbwi is just upgrading access permissions on the current entry;
1689
       that might be a further win.  */
1690
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1691

    
1692
    r4k_invalidate_tlb(env, idx, 0);
1693
    r4k_fill_tlb(idx);
1694
}
1695

    
1696
void r4k_helper_tlbwr (void)
1697
{
1698
    int r = cpu_mips_get_random(env);
1699

    
1700
    r4k_invalidate_tlb(env, r, 1);
1701
    r4k_fill_tlb(r);
1702
}
1703

    
1704
void r4k_helper_tlbp (void)
1705
{
1706
    r4k_tlb_t *tlb;
1707
    target_ulong mask;
1708
    target_ulong tag;
1709
    target_ulong VPN;
1710
    uint8_t ASID;
1711
    int i;
1712

    
1713
    ASID = env->CP0_EntryHi & 0xFF;
1714
    for (i = 0; i < env->tlb->nb_tlb; i++) {
1715
        tlb = &env->tlb->mmu.r4k.tlb[i];
1716
        /* 1k pages are not supported. */
1717
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1718
        tag = env->CP0_EntryHi & ~mask;
1719
        VPN = tlb->VPN & ~mask;
1720
        /* Check ASID, virtual page number & size */
1721
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1722
            /* TLB match */
1723
            env->CP0_Index = i;
1724
            break;
1725
        }
1726
    }
1727
    if (i == env->tlb->nb_tlb) {
1728
        /* No match.  Discard any shadow entries, if any of them match.  */
1729
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1730
            tlb = &env->tlb->mmu.r4k.tlb[i];
1731
            /* 1k pages are not supported. */
1732
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1733
            tag = env->CP0_EntryHi & ~mask;
1734
            VPN = tlb->VPN & ~mask;
1735
            /* Check ASID, virtual page number & size */
1736
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1737
                r4k_mips_tlb_flush_extra (env, i);
1738
                break;
1739
            }
1740
        }
1741

    
1742
        env->CP0_Index |= 0x80000000;
1743
    }
1744
}
1745

    
1746
void r4k_helper_tlbr (void)
1747
{
1748
    r4k_tlb_t *tlb;
1749
    uint8_t ASID;
1750
    int idx;
1751

    
1752
    ASID = env->CP0_EntryHi & 0xFF;
1753
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1754
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1755

    
1756
    /* If this will change the current ASID, flush qemu's TLB.  */
1757
    if (ASID != tlb->ASID)
1758
        cpu_mips_tlb_flush (env, 1);
1759

    
1760
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1761

    
1762
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1763
    env->CP0_PageMask = tlb->PageMask;
1764
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1765
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1766
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1767
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1768
}
1769

    
1770
void helper_tlbwi(void)
1771
{
1772
    env->tlb->helper_tlbwi();
1773
}
1774

    
1775
void helper_tlbwr(void)
1776
{
1777
    env->tlb->helper_tlbwr();
1778
}
1779

    
1780
void helper_tlbp(void)
1781
{
1782
    env->tlb->helper_tlbp();
1783
}
1784

    
1785
void helper_tlbr(void)
1786
{
1787
    env->tlb->helper_tlbr();
1788
}
1789

    
1790
/* Specials */
1791
target_ulong helper_di (void)
1792
{
1793
    target_ulong t0 = env->CP0_Status;
1794

    
1795
    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1796
    cpu_mips_update_irq(env);
1797

    
1798
    return t0;
1799
}
1800

    
1801
target_ulong helper_ei (void)
1802
{
1803
    target_ulong t0 = env->CP0_Status;
1804

    
1805
    env->CP0_Status = t0 | (1 << CP0St_IE);
1806
    cpu_mips_update_irq(env);
1807

    
1808
    return t0;
1809
}
1810

    
1811
static void debug_pre_eret (void)
1812
{
1813
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1814
        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1815
                env->active_tc.PC, env->CP0_EPC);
1816
        if (env->CP0_Status & (1 << CP0St_ERL))
1817
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1818
        if (env->hflags & MIPS_HFLAG_DM)
1819
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1820
        qemu_log("\n");
1821
    }
1822
}
1823

    
1824
static void debug_post_eret (void)
1825
{
1826
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1827
        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1828
                env->active_tc.PC, env->CP0_EPC);
1829
        if (env->CP0_Status & (1 << CP0St_ERL))
1830
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1831
        if (env->hflags & MIPS_HFLAG_DM)
1832
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1833
        switch (env->hflags & MIPS_HFLAG_KSU) {
1834
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1835
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1836
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1837
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1838
        }
1839
    }
1840
}
1841

    
1842
static void set_pc (target_ulong error_pc)
1843
{
1844
    env->active_tc.PC = error_pc & ~(target_ulong)1;
1845
    if (error_pc & 1) {
1846
        env->hflags |= MIPS_HFLAG_M16;
1847
    } else {
1848
        env->hflags &= ~(MIPS_HFLAG_M16);
1849
    }
1850
}
1851

    
1852
void helper_eret (void)
1853
{
1854
    debug_pre_eret();
1855
    if (env->CP0_Status & (1 << CP0St_ERL)) {
1856
        set_pc(env->CP0_ErrorEPC);
1857
        env->CP0_Status &= ~(1 << CP0St_ERL);
1858
    } else {
1859
        set_pc(env->CP0_EPC);
1860
        env->CP0_Status &= ~(1 << CP0St_EXL);
1861
    }
1862
    compute_hflags(env);
1863
    debug_post_eret();
1864
    env->lladdr = 1;
1865
}
1866

    
1867
void helper_deret (void)
1868
{
1869
    debug_pre_eret();
1870
    set_pc(env->CP0_DEPC);
1871

    
1872
    env->hflags &= MIPS_HFLAG_DM;
1873
    compute_hflags(env);
1874
    debug_post_eret();
1875
    env->lladdr = 1;
1876
}
1877
#endif /* !CONFIG_USER_ONLY */
1878

    
1879
target_ulong helper_rdhwr_cpunum(void)
1880
{
1881
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1882
        (env->CP0_HWREna & (1 << 0)))
1883
        return env->CP0_EBase & 0x3ff;
1884
    else
1885
        helper_raise_exception(EXCP_RI);
1886

    
1887
    return 0;
1888
}
1889

    
1890
target_ulong helper_rdhwr_synci_step(void)
1891
{
1892
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1893
        (env->CP0_HWREna & (1 << 1)))
1894
        return env->SYNCI_Step;
1895
    else
1896
        helper_raise_exception(EXCP_RI);
1897

    
1898
    return 0;
1899
}
1900

    
1901
target_ulong helper_rdhwr_cc(void)
1902
{
1903
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1904
        (env->CP0_HWREna & (1 << 2)))
1905
        return env->CP0_Count;
1906
    else
1907
        helper_raise_exception(EXCP_RI);
1908

    
1909
    return 0;
1910
}
1911

    
1912
target_ulong helper_rdhwr_ccres(void)
1913
{
1914
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1915
        (env->CP0_HWREna & (1 << 3)))
1916
        return env->CCRes;
1917
    else
1918
        helper_raise_exception(EXCP_RI);
1919

    
1920
    return 0;
1921
}
1922

    
1923
void helper_pmon (int function)
1924
{
1925
    function /= 2;
1926
    switch (function) {
1927
    case 2: /* TODO: char inbyte(int waitflag); */
1928
        if (env->active_tc.gpr[4] == 0)
1929
            env->active_tc.gpr[2] = -1;
1930
        /* Fall through */
1931
    case 11: /* TODO: char inbyte (void); */
1932
        env->active_tc.gpr[2] = -1;
1933
        break;
1934
    case 3:
1935
    case 12:
1936
        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1937
        break;
1938
    case 17:
1939
        break;
1940
    case 158:
1941
        {
1942
            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
1943
            printf("%s", fmt);
1944
        }
1945
        break;
1946
    }
1947
}
1948

    
1949
void helper_wait (void)
1950
{
1951
    env->halted = 1;
1952
    helper_raise_exception(EXCP_HLT);
1953
}
1954

    
1955
#if !defined(CONFIG_USER_ONLY)
1956

    
1957
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1958

    
1959
#define MMUSUFFIX _mmu
1960
#define ALIGNED_ONLY
1961

    
1962
#define SHIFT 0
1963
#include "softmmu_template.h"
1964

    
1965
#define SHIFT 1
1966
#include "softmmu_template.h"
1967

    
1968
#define SHIFT 2
1969
#include "softmmu_template.h"
1970

    
1971
#define SHIFT 3
1972
#include "softmmu_template.h"
1973

    
1974
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
1975
{
1976
    env->CP0_BadVAddr = addr;
1977
    do_restore_state (retaddr);
1978
    helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
1979
}
1980

    
1981
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1982
{
1983
    TranslationBlock *tb;
1984
    CPUState *saved_env;
1985
    unsigned long pc;
1986
    int ret;
1987

    
1988
    /* XXX: hack to restore env in all cases, even if not called from
1989
       generated code */
1990
    saved_env = env;
1991
    env = cpu_single_env;
1992
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1993
    if (ret) {
1994
        if (retaddr) {
1995
            /* now we have a real cpu fault */
1996
            pc = (unsigned long)retaddr;
1997
            tb = tb_find_pc(pc);
1998
            if (tb) {
1999
                /* the PC is inside the translated code. It means that we have
2000
                   a virtual CPU fault */
2001
                cpu_restore_state(tb, env, pc, NULL);
2002
            }
2003
        }
2004
        helper_raise_exception_err(env->exception_index, env->error_code);
2005
    }
2006
    env = saved_env;
2007
}
2008

    
2009
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
2010
                          int unused, int size)
2011
{
2012
    if (is_exec)
2013
        helper_raise_exception(EXCP_IBE);
2014
    else
2015
        helper_raise_exception(EXCP_DBE);
2016
}
2017
#endif /* !CONFIG_USER_ONLY */
2018

    
2019
/* Complex FPU operations which may need stack space. */
2020

    
2021
#define FLOAT_ONE32 make_float32(0x3f8 << 20)
2022
#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2023
#define FLOAT_TWO32 make_float32(1 << 30)
2024
#define FLOAT_TWO64 make_float64(1ULL << 62)
2025
#define FLOAT_QNAN32 0x7fbfffff
2026
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2027
#define FLOAT_SNAN32 0x7fffffff
2028
#define FLOAT_SNAN64 0x7fffffffffffffffULL
2029

    
2030
/* convert MIPS rounding mode in FCR31 to IEEE library */
2031
static unsigned int ieee_rm[] = {
2032
    float_round_nearest_even,
2033
    float_round_to_zero,
2034
    float_round_up,
2035
    float_round_down
2036
};
2037

    
2038
#define RESTORE_ROUNDING_MODE \
2039
    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2040

    
2041
#define RESTORE_FLUSH_MODE \
2042
    set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2043

    
2044
target_ulong helper_cfc1 (uint32_t reg)
2045
{
2046
    target_ulong arg1;
2047

    
2048
    switch (reg) {
2049
    case 0:
2050
        arg1 = (int32_t)env->active_fpu.fcr0;
2051
        break;
2052
    case 25:
2053
        arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2054
        break;
2055
    case 26:
2056
        arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2057
        break;
2058
    case 28:
2059
        arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2060
        break;
2061
    default:
2062
        arg1 = (int32_t)env->active_fpu.fcr31;
2063
        break;
2064
    }
2065

    
2066
    return arg1;
2067
}
2068

    
2069
void helper_ctc1 (target_ulong arg1, uint32_t reg)
2070
{
2071
    switch(reg) {
2072
    case 25:
2073
        if (arg1 & 0xffffff00)
2074
            return;
2075
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2076
                     ((arg1 & 0x1) << 23);
2077
        break;
2078
    case 26:
2079
        if (arg1 & 0x007c0000)
2080
            return;
2081
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2082
        break;
2083
    case 28:
2084
        if (arg1 & 0x007c0000)
2085
            return;
2086
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2087
                     ((arg1 & 0x4) << 22);
2088
        break;
2089
    case 31:
2090
        if (arg1 & 0x007c0000)
2091
            return;
2092
        env->active_fpu.fcr31 = arg1;
2093
        break;
2094
    default:
2095
        return;
2096
    }
2097
    /* set rounding mode */
2098
    RESTORE_ROUNDING_MODE;
2099
    /* set flush-to-zero mode */
2100
    RESTORE_FLUSH_MODE;
2101
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2102
    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2103
        helper_raise_exception(EXCP_FPE);
2104
}
2105

    
2106
static inline char ieee_ex_to_mips(char xcpt)
2107
{
2108
    return (xcpt & float_flag_inexact) >> 5 |
2109
           (xcpt & float_flag_underflow) >> 3 |
2110
           (xcpt & float_flag_overflow) >> 1 |
2111
           (xcpt & float_flag_divbyzero) << 1 |
2112
           (xcpt & float_flag_invalid) << 4;
2113
}
2114

    
2115
static inline char mips_ex_to_ieee(char xcpt)
2116
{
2117
    return (xcpt & FP_INEXACT) << 5 |
2118
           (xcpt & FP_UNDERFLOW) << 3 |
2119
           (xcpt & FP_OVERFLOW) << 1 |
2120
           (xcpt & FP_DIV0) >> 1 |
2121
           (xcpt & FP_INVALID) >> 4;
2122
}
2123

    
2124
static inline void update_fcr31(void)
2125
{
2126
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2127

    
2128
    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2129
    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2130
        helper_raise_exception(EXCP_FPE);
2131
    else
2132
        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2133
}
2134

    
2135
/* Float support.
2136
   Single precition routines have a "s" suffix, double precision a
2137
   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2138
   paired single lower "pl", paired single upper "pu".  */
2139

    
2140
/* unary operations, modifying fp status  */
2141
uint64_t helper_float_sqrt_d(uint64_t fdt0)
2142
{
2143
    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2144
}
2145

    
2146
uint32_t helper_float_sqrt_s(uint32_t fst0)
2147
{
2148
    return float32_sqrt(fst0, &env->active_fpu.fp_status);
2149
}
2150

    
2151
uint64_t helper_float_cvtd_s(uint32_t fst0)
2152
{
2153
    uint64_t fdt2;
2154

    
2155
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2156
    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2157
    update_fcr31();
2158
    return fdt2;
2159
}
2160

    
2161
uint64_t helper_float_cvtd_w(uint32_t wt0)
2162
{
2163
    uint64_t fdt2;
2164

    
2165
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2166
    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2167
    update_fcr31();
2168
    return fdt2;
2169
}
2170

    
2171
uint64_t helper_float_cvtd_l(uint64_t dt0)
2172
{
2173
    uint64_t fdt2;
2174

    
2175
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2176
    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2177
    update_fcr31();
2178
    return fdt2;
2179
}
2180

    
2181
uint64_t helper_float_cvtl_d(uint64_t fdt0)
2182
{
2183
    uint64_t dt2;
2184

    
2185
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2186
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2187
    update_fcr31();
2188
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2189
        dt2 = FLOAT_SNAN64;
2190
    return dt2;
2191
}
2192

    
2193
uint64_t helper_float_cvtl_s(uint32_t fst0)
2194
{
2195
    uint64_t dt2;
2196

    
2197
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2198
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2199
    update_fcr31();
2200
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2201
        dt2 = FLOAT_SNAN64;
2202
    return dt2;
2203
}
2204

    
2205
uint64_t helper_float_cvtps_pw(uint64_t dt0)
2206
{
2207
    uint32_t fst2;
2208
    uint32_t fsth2;
2209

    
2210
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2211
    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2212
    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2213
    update_fcr31();
2214
    return ((uint64_t)fsth2 << 32) | fst2;
2215
}
2216

    
2217
uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2218
{
2219
    uint32_t wt2;
2220
    uint32_t wth2;
2221

    
2222
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2223
    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2224
    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2225
    update_fcr31();
2226
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2227
        wt2 = FLOAT_SNAN32;
2228
        wth2 = FLOAT_SNAN32;
2229
    }
2230
    return ((uint64_t)wth2 << 32) | wt2;
2231
}
2232

    
2233
uint32_t helper_float_cvts_d(uint64_t fdt0)
2234
{
2235
    uint32_t fst2;
2236

    
2237
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2238
    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2239
    update_fcr31();
2240
    return fst2;
2241
}
2242

    
2243
uint32_t helper_float_cvts_w(uint32_t wt0)
2244
{
2245
    uint32_t fst2;
2246

    
2247
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2248
    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2249
    update_fcr31();
2250
    return fst2;
2251
}
2252

    
2253
uint32_t helper_float_cvts_l(uint64_t dt0)
2254
{
2255
    uint32_t fst2;
2256

    
2257
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2258
    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2259
    update_fcr31();
2260
    return fst2;
2261
}
2262

    
2263
uint32_t helper_float_cvts_pl(uint32_t wt0)
2264
{
2265
    uint32_t wt2;
2266

    
2267
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2268
    wt2 = wt0;
2269
    update_fcr31();
2270
    return wt2;
2271
}
2272

    
2273
uint32_t helper_float_cvts_pu(uint32_t wth0)
2274
{
2275
    uint32_t wt2;
2276

    
2277
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2278
    wt2 = wth0;
2279
    update_fcr31();
2280
    return wt2;
2281
}
2282

    
2283
uint32_t helper_float_cvtw_s(uint32_t fst0)
2284
{
2285
    uint32_t wt2;
2286

    
2287
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2288
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2289
    update_fcr31();
2290
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2291
        wt2 = FLOAT_SNAN32;
2292
    return wt2;
2293
}
2294

    
2295
uint32_t helper_float_cvtw_d(uint64_t fdt0)
2296
{
2297
    uint32_t wt2;
2298

    
2299
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2300
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2301
    update_fcr31();
2302
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2303
        wt2 = FLOAT_SNAN32;
2304
    return wt2;
2305
}
2306

    
2307
uint64_t helper_float_roundl_d(uint64_t fdt0)
2308
{
2309
    uint64_t dt2;
2310

    
2311
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2312
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2313
    RESTORE_ROUNDING_MODE;
2314
    update_fcr31();
2315
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2316
        dt2 = FLOAT_SNAN64;
2317
    return dt2;
2318
}
2319

    
2320
uint64_t helper_float_roundl_s(uint32_t fst0)
2321
{
2322
    uint64_t dt2;
2323

    
2324
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2325
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2326
    RESTORE_ROUNDING_MODE;
2327
    update_fcr31();
2328
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2329
        dt2 = FLOAT_SNAN64;
2330
    return dt2;
2331
}
2332

    
2333
uint32_t helper_float_roundw_d(uint64_t fdt0)
2334
{
2335
    uint32_t wt2;
2336

    
2337
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2338
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2339
    RESTORE_ROUNDING_MODE;
2340
    update_fcr31();
2341
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2342
        wt2 = FLOAT_SNAN32;
2343
    return wt2;
2344
}
2345

    
2346
uint32_t helper_float_roundw_s(uint32_t fst0)
2347
{
2348
    uint32_t wt2;
2349

    
2350
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2351
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2352
    RESTORE_ROUNDING_MODE;
2353
    update_fcr31();
2354
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2355
        wt2 = FLOAT_SNAN32;
2356
    return wt2;
2357
}
2358

    
2359
uint64_t helper_float_truncl_d(uint64_t fdt0)
2360
{
2361
    uint64_t dt2;
2362

    
2363
    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2364
    update_fcr31();
2365
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2366
        dt2 = FLOAT_SNAN64;
2367
    return dt2;
2368
}
2369

    
2370
uint64_t helper_float_truncl_s(uint32_t fst0)
2371
{
2372
    uint64_t dt2;
2373

    
2374
    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2375
    update_fcr31();
2376
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2377
        dt2 = FLOAT_SNAN64;
2378
    return dt2;
2379
}
2380

    
2381
uint32_t helper_float_truncw_d(uint64_t fdt0)
2382
{
2383
    uint32_t wt2;
2384

    
2385
    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2386
    update_fcr31();
2387
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2388
        wt2 = FLOAT_SNAN32;
2389
    return wt2;
2390
}
2391

    
2392
uint32_t helper_float_truncw_s(uint32_t fst0)
2393
{
2394
    uint32_t wt2;
2395

    
2396
    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2397
    update_fcr31();
2398
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2399
        wt2 = FLOAT_SNAN32;
2400
    return wt2;
2401
}
2402

    
2403
uint64_t helper_float_ceill_d(uint64_t fdt0)
2404
{
2405
    uint64_t dt2;
2406

    
2407
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2408
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2409
    RESTORE_ROUNDING_MODE;
2410
    update_fcr31();
2411
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2412
        dt2 = FLOAT_SNAN64;
2413
    return dt2;
2414
}
2415

    
2416
uint64_t helper_float_ceill_s(uint32_t fst0)
2417
{
2418
    uint64_t dt2;
2419

    
2420
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2421
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2422
    RESTORE_ROUNDING_MODE;
2423
    update_fcr31();
2424
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2425
        dt2 = FLOAT_SNAN64;
2426
    return dt2;
2427
}
2428

    
2429
uint32_t helper_float_ceilw_d(uint64_t fdt0)
2430
{
2431
    uint32_t wt2;
2432

    
2433
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2434
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2435
    RESTORE_ROUNDING_MODE;
2436
    update_fcr31();
2437
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2438
        wt2 = FLOAT_SNAN32;
2439
    return wt2;
2440
}
2441

    
2442
uint32_t helper_float_ceilw_s(uint32_t fst0)
2443
{
2444
    uint32_t wt2;
2445

    
2446
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2447
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2448
    RESTORE_ROUNDING_MODE;
2449
    update_fcr31();
2450
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2451
        wt2 = FLOAT_SNAN32;
2452
    return wt2;
2453
}
2454

    
2455
uint64_t helper_float_floorl_d(uint64_t fdt0)
2456
{
2457
    uint64_t dt2;
2458

    
2459
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2460
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2461
    RESTORE_ROUNDING_MODE;
2462
    update_fcr31();
2463
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2464
        dt2 = FLOAT_SNAN64;
2465
    return dt2;
2466
}
2467

    
2468
uint64_t helper_float_floorl_s(uint32_t fst0)
2469
{
2470
    uint64_t dt2;
2471

    
2472
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2473
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2474
    RESTORE_ROUNDING_MODE;
2475
    update_fcr31();
2476
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2477
        dt2 = FLOAT_SNAN64;
2478
    return dt2;
2479
}
2480

    
2481
uint32_t helper_float_floorw_d(uint64_t fdt0)
2482
{
2483
    uint32_t wt2;
2484

    
2485
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2486
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2487
    RESTORE_ROUNDING_MODE;
2488
    update_fcr31();
2489
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2490
        wt2 = FLOAT_SNAN32;
2491
    return wt2;
2492
}
2493

    
2494
uint32_t helper_float_floorw_s(uint32_t fst0)
2495
{
2496
    uint32_t wt2;
2497

    
2498
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2499
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2500
    RESTORE_ROUNDING_MODE;
2501
    update_fcr31();
2502
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2503
        wt2 = FLOAT_SNAN32;
2504
    return wt2;
2505
}
2506

    
2507
/* unary operations, not modifying fp status  */
2508
#define FLOAT_UNOP(name)                                       \
2509
uint64_t helper_float_ ## name ## _d(uint64_t fdt0)                \
2510
{                                                              \
2511
    return float64_ ## name(fdt0);                             \
2512
}                                                              \
2513
uint32_t helper_float_ ## name ## _s(uint32_t fst0)                \
2514
{                                                              \
2515
    return float32_ ## name(fst0);                             \
2516
}                                                              \
2517
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0)               \
2518
{                                                              \
2519
    uint32_t wt0;                                              \
2520
    uint32_t wth0;                                             \
2521
                                                               \
2522
    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2523
    wth0 = float32_ ## name(fdt0 >> 32);                       \
2524
    return ((uint64_t)wth0 << 32) | wt0;                       \
2525
}
2526
FLOAT_UNOP(abs)
2527
FLOAT_UNOP(chs)
2528
#undef FLOAT_UNOP
2529

    
2530
/* MIPS specific unary operations */
2531
uint64_t helper_float_recip_d(uint64_t fdt0)
2532
{
2533
    uint64_t fdt2;
2534

    
2535
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2536
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2537
    update_fcr31();
2538
    return fdt2;
2539
}
2540

    
2541
uint32_t helper_float_recip_s(uint32_t fst0)
2542
{
2543
    uint32_t fst2;
2544

    
2545
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2546
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2547
    update_fcr31();
2548
    return fst2;
2549
}
2550

    
2551
uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2552
{
2553
    uint64_t fdt2;
2554

    
2555
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2556
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2557
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2558
    update_fcr31();
2559
    return fdt2;
2560
}
2561

    
2562
uint32_t helper_float_rsqrt_s(uint32_t fst0)
2563
{
2564
    uint32_t fst2;
2565

    
2566
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2567
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2568
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2569
    update_fcr31();
2570
    return fst2;
2571
}
2572

    
2573
uint64_t helper_float_recip1_d(uint64_t fdt0)
2574
{
2575
    uint64_t fdt2;
2576

    
2577
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2578
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2579
    update_fcr31();
2580
    return fdt2;
2581
}
2582

    
2583
uint32_t helper_float_recip1_s(uint32_t fst0)
2584
{
2585
    uint32_t fst2;
2586

    
2587
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2588
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2589
    update_fcr31();
2590
    return fst2;
2591
}
2592

    
2593
uint64_t helper_float_recip1_ps(uint64_t fdt0)
2594
{
2595
    uint32_t fst2;
2596
    uint32_t fsth2;
2597

    
2598
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2599
    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2600
    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2601
    update_fcr31();
2602
    return ((uint64_t)fsth2 << 32) | fst2;
2603
}
2604

    
2605
uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2606
{
2607
    uint64_t fdt2;
2608

    
2609
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2610
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2611
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2612
    update_fcr31();
2613
    return fdt2;
2614
}
2615

    
2616
uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2617
{
2618
    uint32_t fst2;
2619

    
2620
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2621
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2622
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2623
    update_fcr31();
2624
    return fst2;
2625
}
2626

    
2627
uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2628
{
2629
    uint32_t fst2;
2630
    uint32_t fsth2;
2631

    
2632
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2633
    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2634
    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2635
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2636
    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2637
    update_fcr31();
2638
    return ((uint64_t)fsth2 << 32) | fst2;
2639
}
2640

    
2641
#define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2642

    
2643
/* binary operations */
2644
#define FLOAT_BINOP(name)                                          \
2645
uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
2646
{                                                                  \
2647
    uint64_t dt2;                                                  \
2648
                                                                   \
2649
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2650
    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
2651
    update_fcr31();                                                \
2652
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2653
        dt2 = FLOAT_QNAN64;                                        \
2654
    return dt2;                                                    \
2655
}                                                                  \
2656
                                                                   \
2657
uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
2658
{                                                                  \
2659
    uint32_t wt2;                                                  \
2660
                                                                   \
2661
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2662
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2663
    update_fcr31();                                                \
2664
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2665
        wt2 = FLOAT_QNAN32;                                        \
2666
    return wt2;                                                    \
2667
}                                                                  \
2668
                                                                   \
2669
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
2670
{                                                                  \
2671
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2672
    uint32_t fsth0 = fdt0 >> 32;                                   \
2673
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2674
    uint32_t fsth1 = fdt1 >> 32;                                   \
2675
    uint32_t wt2;                                                  \
2676
    uint32_t wth2;                                                 \
2677
                                                                   \
2678
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2679
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2680
    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
2681
    update_fcr31();                                                \
2682
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
2683
        wt2 = FLOAT_QNAN32;                                        \
2684
        wth2 = FLOAT_QNAN32;                                       \
2685
    }                                                              \
2686
    return ((uint64_t)wth2 << 32) | wt2;                           \
2687
}
2688

    
2689
FLOAT_BINOP(add)
2690
FLOAT_BINOP(sub)
2691
FLOAT_BINOP(mul)
2692
FLOAT_BINOP(div)
2693
#undef FLOAT_BINOP
2694

    
2695
/* ternary operations */
2696
#define FLOAT_TERNOP(name1, name2)                                        \
2697
uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
2698
                                           uint64_t fdt2)                 \
2699
{                                                                         \
2700
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2701
    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2702
}                                                                         \
2703
                                                                          \
2704
uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
2705
                                           uint32_t fst2)                 \
2706
{                                                                         \
2707
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2708
    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2709
}                                                                         \
2710
                                                                          \
2711
uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2712
                                            uint64_t fdt2)                \
2713
{                                                                         \
2714
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2715
    uint32_t fsth0 = fdt0 >> 32;                                          \
2716
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2717
    uint32_t fsth1 = fdt1 >> 32;                                          \
2718
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2719
    uint32_t fsth2 = fdt2 >> 32;                                          \
2720
                                                                          \
2721
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2722
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2723
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2724
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2725
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2726
}
2727

    
2728
FLOAT_TERNOP(mul, add)
2729
FLOAT_TERNOP(mul, sub)
2730
#undef FLOAT_TERNOP
2731

    
2732
/* negated ternary operations */
2733
#define FLOAT_NTERNOP(name1, name2)                                       \
2734
uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2735
                                           uint64_t fdt2)                 \
2736
{                                                                         \
2737
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2738
    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2739
    return float64_chs(fdt2);                                             \
2740
}                                                                         \
2741
                                                                          \
2742
uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2743
                                           uint32_t fst2)                 \
2744
{                                                                         \
2745
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2746
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2747
    return float32_chs(fst2);                                             \
2748
}                                                                         \
2749
                                                                          \
2750
uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2751
                                           uint64_t fdt2)                 \
2752
{                                                                         \
2753
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2754
    uint32_t fsth0 = fdt0 >> 32;                                          \
2755
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2756
    uint32_t fsth1 = fdt1 >> 32;                                          \
2757
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2758
    uint32_t fsth2 = fdt2 >> 32;                                          \
2759
                                                                          \
2760
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2761
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2762
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2763
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2764
    fst2 = float32_chs(fst2);                                             \
2765
    fsth2 = float32_chs(fsth2);                                           \
2766
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2767
}
2768

    
2769
FLOAT_NTERNOP(mul, add)
2770
FLOAT_NTERNOP(mul, sub)
2771
#undef FLOAT_NTERNOP
2772

    
2773
/* MIPS specific binary operations */
2774
uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2775
{
2776
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2777
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2778
    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2779
    update_fcr31();
2780
    return fdt2;
2781
}
2782

    
2783
uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
2784
{
2785
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2786
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2787
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2788
    update_fcr31();
2789
    return fst2;
2790
}
2791

    
2792
uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2793
{
2794
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2795
    uint32_t fsth0 = fdt0 >> 32;
2796
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2797
    uint32_t fsth2 = fdt2 >> 32;
2798

    
2799
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2800
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2801
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2802
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2803
    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
2804
    update_fcr31();
2805
    return ((uint64_t)fsth2 << 32) | fst2;
2806
}
2807

    
2808
uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
2809
{
2810
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2811
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2812
    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
2813
    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
2814
    update_fcr31();
2815
    return fdt2;
2816
}
2817

    
2818
uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
2819
{
2820
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2821
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2822
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2823
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2824
    update_fcr31();
2825
    return fst2;
2826
}
2827

    
2828
uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
2829
{
2830
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2831
    uint32_t fsth0 = fdt0 >> 32;
2832
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2833
    uint32_t fsth2 = fdt2 >> 32;
2834

    
2835
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2836
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2837
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2838
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2839
    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
2840
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2841
    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
2842
    update_fcr31();
2843
    return ((uint64_t)fsth2 << 32) | fst2;
2844
}
2845

    
2846
uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
2847
{
2848
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2849
    uint32_t fsth0 = fdt0 >> 32;
2850
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2851
    uint32_t fsth1 = fdt1 >> 32;
2852
    uint32_t fst2;
2853
    uint32_t fsth2;
2854

    
2855
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2856
    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
2857
    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
2858
    update_fcr31();
2859
    return ((uint64_t)fsth2 << 32) | fst2;
2860
}
2861

    
2862
uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
2863
{
2864
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2865
    uint32_t fsth0 = fdt0 >> 32;
2866
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2867
    uint32_t fsth1 = fdt1 >> 32;
2868
    uint32_t fst2;
2869
    uint32_t fsth2;
2870

    
2871
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2872
    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
2873
    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
2874
    update_fcr31();
2875
    return ((uint64_t)fsth2 << 32) | fst2;
2876
}
2877

    
2878
/* compare operations */
2879
#define FOP_COND_D(op, cond)                                   \
2880
void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2881
{                                                              \
2882
    int c = cond;                                              \
2883
    update_fcr31();                                            \
2884
    if (c)                                                     \
2885
        SET_FP_COND(cc, env->active_fpu);                      \
2886
    else                                                       \
2887
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2888
}                                                              \
2889
void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2890
{                                                              \
2891
    int c;                                                     \
2892
    fdt0 = float64_abs(fdt0);                                  \
2893
    fdt1 = float64_abs(fdt1);                                  \
2894
    c = cond;                                                  \
2895
    update_fcr31();                                            \
2896
    if (c)                                                     \
2897
        SET_FP_COND(cc, env->active_fpu);                      \
2898
    else                                                       \
2899
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2900
}
2901

    
2902
static int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
2903
{
2904
    if (float64_is_signaling_nan(a) ||
2905
        float64_is_signaling_nan(b) ||
2906
        (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
2907
        float_raise(float_flag_invalid, status);
2908
        return 1;
2909
    } else if (float64_is_nan(a) || float64_is_nan(b)) {
2910
        return 1;
2911
    } else {
2912
        return 0;
2913
    }
2914
}
2915

    
2916
/* NOTE: the comma operator will make "cond" to eval to false,
2917
 * but float*_is_unordered() is still called. */
2918
FOP_COND_D(f,   (float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2919
FOP_COND_D(un,  float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status))
2920
FOP_COND_D(eq,  !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2921
FOP_COND_D(ueq, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2922
FOP_COND_D(olt, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2923
FOP_COND_D(ult, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2924
FOP_COND_D(ole, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2925
FOP_COND_D(ule, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2926
/* NOTE: the comma operator will make "cond" to eval to false,
2927
 * but float*_is_unordered() is still called. */
2928
FOP_COND_D(sf,  (float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2929
FOP_COND_D(ngle,float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status))
2930
FOP_COND_D(seq, !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2931
FOP_COND_D(ngl, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2932
FOP_COND_D(lt,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2933
FOP_COND_D(nge, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2934
FOP_COND_D(le,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2935
FOP_COND_D(ngt, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2936

    
2937
#define FOP_COND_S(op, cond)                                   \
2938
void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
2939
{                                                              \
2940
    int c = cond;                                              \
2941
    update_fcr31();                                            \
2942
    if (c)                                                     \
2943
        SET_FP_COND(cc, env->active_fpu);                      \
2944
    else                                                       \
2945
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2946
}                                                              \
2947
void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2948
{                                                              \
2949
    int c;                                                     \
2950
    fst0 = float32_abs(fst0);                                  \
2951
    fst1 = float32_abs(fst1);                                  \
2952
    c = cond;                                                  \
2953
    update_fcr31();                                            \
2954
    if (c)                                                     \
2955
        SET_FP_COND(cc, env->active_fpu);                      \
2956
    else                                                       \
2957
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2958
}
2959

    
2960
static flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
2961
{
2962
    if (float32_is_signaling_nan(a) ||
2963
        float32_is_signaling_nan(b) ||
2964
        (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
2965
        float_raise(float_flag_invalid, status);
2966
        return 1;
2967
    } else if (float32_is_nan(a) || float32_is_nan(b)) {
2968
        return 1;
2969
    } else {
2970
        return 0;
2971
    }
2972
}
2973

    
2974
/* NOTE: the comma operator will make "cond" to eval to false,
2975
 * but float*_is_unordered() is still called. */
2976
FOP_COND_S(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0))
2977
FOP_COND_S(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status))
2978
FOP_COND_S(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2979
FOP_COND_S(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2980
FOP_COND_S(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2981
FOP_COND_S(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2982
FOP_COND_S(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2983
FOP_COND_S(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2984
/* NOTE: the comma operator will make "cond" to eval to false,
2985
 * but float*_is_unordered() is still called. */
2986
FOP_COND_S(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0))
2987
FOP_COND_S(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status))
2988
FOP_COND_S(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2989
FOP_COND_S(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2990
FOP_COND_S(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2991
FOP_COND_S(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2992
FOP_COND_S(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2993
FOP_COND_S(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2994

    
2995
#define FOP_COND_PS(op, condl, condh)                           \
2996
void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2997
{                                                               \
2998
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2999
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
3000
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
3001
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
3002
    int cl = condl;                                             \
3003
    int ch = condh;                                             \
3004
                                                                \
3005
    update_fcr31();                                             \
3006
    if (cl)                                                     \
3007
        SET_FP_COND(cc, env->active_fpu);                       \
3008
    else                                                        \
3009
        CLEAR_FP_COND(cc, env->active_fpu);                     \
3010
    if (ch)                                                     \
3011
        SET_FP_COND(cc + 1, env->active_fpu);                   \
3012
    else                                                        \
3013
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3014
}                                                               \
3015
void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3016
{                                                               \
3017
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
3018
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
3019
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
3020
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
3021
    int cl = condl;                                             \
3022
    int ch = condh;                                             \
3023
                                                                \
3024
    update_fcr31();                                             \
3025
    if (cl)                                                     \
3026
        SET_FP_COND(cc, env->active_fpu);                       \
3027
    else                                                        \
3028
        CLEAR_FP_COND(cc, env->active_fpu);                     \
3029
    if (ch)                                                     \
3030
        SET_FP_COND(cc + 1, env->active_fpu);                   \
3031
    else                                                        \
3032
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
3033
}
3034

    
3035
/* NOTE: the comma operator will make "cond" to eval to false,
3036
 * but float*_is_unordered() is still called. */
3037
FOP_COND_PS(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0),
3038
                 (float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status), 0))
3039
FOP_COND_PS(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status),
3040
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status))
3041
FOP_COND_PS(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3042
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3043
FOP_COND_PS(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3044
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3045
FOP_COND_PS(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3046
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3047
FOP_COND_PS(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3048
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3049
FOP_COND_PS(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3050
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3051
FOP_COND_PS(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3052
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3053
/* NOTE: the comma operator will make "cond" to eval to false,
3054
 * but float*_is_unordered() is still called. */
3055
FOP_COND_PS(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0),
3056
                 (float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status), 0))
3057
FOP_COND_PS(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status),
3058
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status))
3059
FOP_COND_PS(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3060
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3061
FOP_COND_PS(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3062
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3063
FOP_COND_PS(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3064
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3065
FOP_COND_PS(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3066
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3067
FOP_COND_PS(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3068
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3069
FOP_COND_PS(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3070
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))