Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ 3a599383

History | View | Annotate | Download (95.7 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdlib.h>
20
#include "exec.h"
21

    
22
#include "host-utils.h"
23

    
24
#include "helper.h"
25

    
26
#ifndef CONFIG_USER_ONLY
27
static inline void cpu_mips_tlb_flush (CPUState *env, int flush_global);
28
#endif
29

    
30
/*****************************************************************************/
31
/* Exceptions processing helpers */
32

    
33
void helper_raise_exception_err (uint32_t exception, int error_code)
34
{
35
#if 1
36
    if (exception < 0x100)
37
        qemu_log("%s: %d %d\n", __func__, exception, error_code);
38
#endif
39
    env->exception_index = exception;
40
    env->error_code = error_code;
41
    cpu_loop_exit();
42
}
43

    
44
void helper_raise_exception (uint32_t exception)
45
{
46
    helper_raise_exception_err(exception, 0);
47
}
48

    
49
#if !defined(CONFIG_USER_ONLY)
50
static void do_restore_state (void *pc_ptr)
51
{
52
    TranslationBlock *tb;
53
    unsigned long pc = (unsigned long) pc_ptr;
54
    
55
    tb = tb_find_pc (pc);
56
    if (tb) {
57
        cpu_restore_state (tb, env, pc, NULL);
58
    }
59
}
60
#endif
61

    
62
#if defined(CONFIG_USER_ONLY)
63
#define HELPER_LD(name, insn, type)                                     \
64
static inline type do_##name(target_ulong addr, int mem_idx)            \
65
{                                                                       \
66
    return (type) insn##_raw(addr);                                     \
67
}
68
#else
69
#define HELPER_LD(name, insn, type)                                     \
70
static inline type do_##name(target_ulong addr, int mem_idx)            \
71
{                                                                       \
72
    switch (mem_idx)                                                    \
73
    {                                                                   \
74
    case 0: return (type) insn##_kernel(addr); break;                   \
75
    case 1: return (type) insn##_super(addr); break;                    \
76
    default:                                                            \
77
    case 2: return (type) insn##_user(addr); break;                     \
78
    }                                                                   \
79
}
80
#endif
81
HELPER_LD(lbu, ldub, uint8_t)
82
HELPER_LD(lw, ldl, int32_t)
83
#ifdef TARGET_MIPS64
84
HELPER_LD(ld, ldq, int64_t)
85
#endif
86
#undef HELPER_LD
87

    
88
#if defined(CONFIG_USER_ONLY)
89
#define HELPER_ST(name, insn, type)                                     \
90
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
91
{                                                                       \
92
    insn##_raw(addr, val);                                              \
93
}
94
#else
95
#define HELPER_ST(name, insn, type)                                     \
96
static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
97
{                                                                       \
98
    switch (mem_idx)                                                    \
99
    {                                                                   \
100
    case 0: insn##_kernel(addr, val); break;                            \
101
    case 1: insn##_super(addr, val); break;                             \
102
    default:                                                            \
103
    case 2: insn##_user(addr, val); break;                              \
104
    }                                                                   \
105
}
106
#endif
107
HELPER_ST(sb, stb, uint8_t)
108
HELPER_ST(sw, stl, uint32_t)
109
#ifdef TARGET_MIPS64
110
HELPER_ST(sd, stq, uint64_t)
111
#endif
112
#undef HELPER_ST
113

    
114
target_ulong helper_clo (target_ulong arg1)
115
{
116
    return clo32(arg1);
117
}
118

    
119
target_ulong helper_clz (target_ulong arg1)
120
{
121
    return clz32(arg1);
122
}
123

    
124
#if defined(TARGET_MIPS64)
125
target_ulong helper_dclo (target_ulong arg1)
126
{
127
    return clo64(arg1);
128
}
129

    
130
target_ulong helper_dclz (target_ulong arg1)
131
{
132
    return clz64(arg1);
133
}
134
#endif /* TARGET_MIPS64 */
135

    
136
/* 64 bits arithmetic for 32 bits hosts */
137
static inline uint64_t get_HILO (void)
138
{
139
    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
140
}
141

    
142
static inline void set_HILO (uint64_t HILO)
143
{
144
    env->active_tc.LO[0] = (int32_t)HILO;
145
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
146
}
147

    
148
static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
149
{
150
    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
151
    arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
152
}
153

    
154
static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
155
{
156
    arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
157
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
158
}
159

    
160
/* Multiplication variants of the vr54xx. */
161
target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
162
{
163
    set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
164

    
165
    return arg1;
166
}
167

    
168
target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
169
{
170
    set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
171

    
172
    return arg1;
173
}
174

    
175
target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
176
{
177
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
178

    
179
    return arg1;
180
}
181

    
182
target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
183
{
184
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
185

    
186
    return arg1;
187
}
188

    
189
target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
190
{
191
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
192

    
193
    return arg1;
194
}
195

    
196
target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
197
{
198
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
199

    
200
    return arg1;
201
}
202

    
203
target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
204
{
205
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
206

    
207
    return arg1;
208
}
209

    
210
target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
211
{
212
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
213

    
214
    return arg1;
215
}
216

    
217
target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
218
{
219
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
220

    
221
    return arg1;
222
}
223

    
224
target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
225
{
226
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
227

    
228
    return arg1;
229
}
230

    
231
target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
232
{
233
    set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
234

    
235
    return arg1;
236
}
237

    
238
target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
239
{
240
    set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
241

    
242
    return arg1;
243
}
244

    
245
target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
246
{
247
    set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
248

    
249
    return arg1;
250
}
251

    
252
target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
253
{
254
    set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
255

    
256
    return arg1;
257
}
258

    
259
#ifdef TARGET_MIPS64
260
void helper_dmult (target_ulong arg1, target_ulong arg2)
261
{
262
    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
263
}
264

    
265
void helper_dmultu (target_ulong arg1, target_ulong arg2)
266
{
267
    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
268
}
269
#endif
270

    
271
#ifndef CONFIG_USER_ONLY
272

    
273
static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
274
{
275
    target_phys_addr_t lladdr;
276

    
277
    lladdr = cpu_mips_translate_address(env, address, rw);
278

    
279
    if (lladdr == -1LL) {
280
        cpu_loop_exit();
281
    } else {
282
        return lladdr;
283
    }
284
}
285

    
286
#define HELPER_LD_ATOMIC(name, insn)                                          \
287
target_ulong helper_##name(target_ulong arg, int mem_idx)                     \
288
{                                                                             \
289
    env->lladdr = do_translate_address(arg, 0);                               \
290
    env->llval = do_##insn(arg, mem_idx);                                     \
291
    return env->llval;                                                        \
292
}
293
HELPER_LD_ATOMIC(ll, lw)
294
#ifdef TARGET_MIPS64
295
HELPER_LD_ATOMIC(lld, ld)
296
#endif
297
#undef HELPER_LD_ATOMIC
298

    
299
#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask)                      \
300
target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
301
{                                                                             \
302
    target_long tmp;                                                          \
303
                                                                              \
304
    if (arg2 & almask) {                                                      \
305
        env->CP0_BadVAddr = arg2;                                             \
306
        helper_raise_exception(EXCP_AdES);                                    \
307
    }                                                                         \
308
    if (do_translate_address(arg2, 1) == env->lladdr) {                       \
309
        tmp = do_##ld_insn(arg2, mem_idx);                                    \
310
        if (tmp == env->llval) {                                              \
311
            do_##st_insn(arg2, arg1, mem_idx);                                \
312
            return 1;                                                         \
313
        }                                                                     \
314
    }                                                                         \
315
    return 0;                                                                 \
316
}
317
HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
318
#ifdef TARGET_MIPS64
319
HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
320
#endif
321
#undef HELPER_ST_ATOMIC
322
#endif
323

    
324
#ifdef TARGET_WORDS_BIGENDIAN
325
#define GET_LMASK(v) ((v) & 3)
326
#define GET_OFFSET(addr, offset) (addr + (offset))
327
#else
328
#define GET_LMASK(v) (((v) & 3) ^ 3)
329
#define GET_OFFSET(addr, offset) (addr - (offset))
330
#endif
331

    
332
target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
333
{
334
    target_ulong tmp;
335

    
336
    tmp = do_lbu(arg2, mem_idx);
337
    arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
338

    
339
    if (GET_LMASK(arg2) <= 2) {
340
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
341
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
342
    }
343

    
344
    if (GET_LMASK(arg2) <= 1) {
345
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
346
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
347
    }
348

    
349
    if (GET_LMASK(arg2) == 0) {
350
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
351
        arg1 = (arg1 & 0xFFFFFF00) | tmp;
352
    }
353
    return (int32_t)arg1;
354
}
355

    
356
target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
357
{
358
    target_ulong tmp;
359

    
360
    tmp = do_lbu(arg2, mem_idx);
361
    arg1 = (arg1 & 0xFFFFFF00) | tmp;
362

    
363
    if (GET_LMASK(arg2) >= 1) {
364
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
365
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
366
    }
367

    
368
    if (GET_LMASK(arg2) >= 2) {
369
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
370
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
371
    }
372

    
373
    if (GET_LMASK(arg2) == 3) {
374
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
375
        arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
376
    }
377
    return (int32_t)arg1;
378
}
379

    
380
void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
381
{
382
    do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
383

    
384
    if (GET_LMASK(arg2) <= 2)
385
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
386

    
387
    if (GET_LMASK(arg2) <= 1)
388
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
389

    
390
    if (GET_LMASK(arg2) == 0)
391
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
392
}
393

    
394
void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
395
{
396
    do_sb(arg2, (uint8_t)arg1, mem_idx);
397

    
398
    if (GET_LMASK(arg2) >= 1)
399
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
400

    
401
    if (GET_LMASK(arg2) >= 2)
402
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
403

    
404
    if (GET_LMASK(arg2) == 3)
405
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
406
}
407

    
408
#if defined(TARGET_MIPS64)
409
/* "half" load and stores.  We must do the memory access inline,
410
   or fault handling won't work.  */
411

    
412
#ifdef TARGET_WORDS_BIGENDIAN
413
#define GET_LMASK64(v) ((v) & 7)
414
#else
415
#define GET_LMASK64(v) (((v) & 7) ^ 7)
416
#endif
417

    
418
target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
419
{
420
    uint64_t tmp;
421

    
422
    tmp = do_lbu(arg2, mem_idx);
423
    arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
424

    
425
    if (GET_LMASK64(arg2) <= 6) {
426
        tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
427
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
428
    }
429

    
430
    if (GET_LMASK64(arg2) <= 5) {
431
        tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
432
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
433
    }
434

    
435
    if (GET_LMASK64(arg2) <= 4) {
436
        tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
437
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
438
    }
439

    
440
    if (GET_LMASK64(arg2) <= 3) {
441
        tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
442
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
443
    }
444

    
445
    if (GET_LMASK64(arg2) <= 2) {
446
        tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
447
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
448
    }
449

    
450
    if (GET_LMASK64(arg2) <= 1) {
451
        tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
452
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
453
    }
454

    
455
    if (GET_LMASK64(arg2) == 0) {
456
        tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
457
        arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
458
    }
459

    
460
    return arg1;
461
}
462

    
463
target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
464
{
465
    uint64_t tmp;
466

    
467
    tmp = do_lbu(arg2, mem_idx);
468
    arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
469

    
470
    if (GET_LMASK64(arg2) >= 1) {
471
        tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
472
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
473
    }
474

    
475
    if (GET_LMASK64(arg2) >= 2) {
476
        tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
477
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
478
    }
479

    
480
    if (GET_LMASK64(arg2) >= 3) {
481
        tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
482
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
483
    }
484

    
485
    if (GET_LMASK64(arg2) >= 4) {
486
        tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
487
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
488
    }
489

    
490
    if (GET_LMASK64(arg2) >= 5) {
491
        tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
492
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
493
    }
494

    
495
    if (GET_LMASK64(arg2) >= 6) {
496
        tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
497
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
498
    }
499

    
500
    if (GET_LMASK64(arg2) == 7) {
501
        tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
502
        arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
503
    }
504

    
505
    return arg1;
506
}
507

    
508
void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
509
{
510
    do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
511

    
512
    if (GET_LMASK64(arg2) <= 6)
513
        do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
514

    
515
    if (GET_LMASK64(arg2) <= 5)
516
        do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
517

    
518
    if (GET_LMASK64(arg2) <= 4)
519
        do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
520

    
521
    if (GET_LMASK64(arg2) <= 3)
522
        do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
523

    
524
    if (GET_LMASK64(arg2) <= 2)
525
        do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
526

    
527
    if (GET_LMASK64(arg2) <= 1)
528
        do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
529

    
530
    if (GET_LMASK64(arg2) <= 0)
531
        do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
532
}
533

    
534
void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
535
{
536
    do_sb(arg2, (uint8_t)arg1, mem_idx);
537

    
538
    if (GET_LMASK64(arg2) >= 1)
539
        do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
540

    
541
    if (GET_LMASK64(arg2) >= 2)
542
        do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
543

    
544
    if (GET_LMASK64(arg2) >= 3)
545
        do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
546

    
547
    if (GET_LMASK64(arg2) >= 4)
548
        do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
549

    
550
    if (GET_LMASK64(arg2) >= 5)
551
        do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
552

    
553
    if (GET_LMASK64(arg2) >= 6)
554
        do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
555

    
556
    if (GET_LMASK64(arg2) == 7)
557
        do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
558
}
559
#endif /* TARGET_MIPS64 */
560

    
561
static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
562

    
563
void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
564
{
565
    target_ulong base_reglist = reglist & 0xf;
566
    target_ulong do_r31 = reglist & 0x10;
567
#ifdef CONFIG_USER_ONLY
568
#undef ldfun
569
#define ldfun ldl_raw
570
#else
571
    uint32_t (*ldfun)(target_ulong);
572

    
573
    switch (mem_idx)
574
    {
575
    case 0: ldfun = ldl_kernel; break;
576
    case 1: ldfun = ldl_super; break;
577
    default:
578
    case 2: ldfun = ldl_user; break;
579
    }
580
#endif
581

    
582
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
583
        target_ulong i;
584

    
585
        for (i = 0; i < base_reglist; i++) {
586
            env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
587
            addr += 4;
588
        }
589
    }
590

    
591
    if (do_r31) {
592
        env->active_tc.gpr[31] = (target_long) ldfun(addr);
593
    }
594
}
595

    
596
void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
597
{
598
    target_ulong base_reglist = reglist & 0xf;
599
    target_ulong do_r31 = reglist & 0x10;
600
#ifdef CONFIG_USER_ONLY
601
#undef stfun
602
#define stfun stl_raw
603
#else
604
    void (*stfun)(target_ulong, uint32_t);
605

    
606
    switch (mem_idx)
607
    {
608
    case 0: stfun = stl_kernel; break;
609
    case 1: stfun = stl_super; break;
610
     default:
611
    case 2: stfun = stl_user; break;
612
    }
613
#endif
614

    
615
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
616
        target_ulong i;
617

    
618
        for (i = 0; i < base_reglist; i++) {
619
            stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
620
            addr += 4;
621
        }
622
    }
623

    
624
    if (do_r31) {
625
        stfun(addr, env->active_tc.gpr[31]);
626
    }
627
}
628

    
629
#if defined(TARGET_MIPS64)
630
void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
631
{
632
    target_ulong base_reglist = reglist & 0xf;
633
    target_ulong do_r31 = reglist & 0x10;
634
#ifdef CONFIG_USER_ONLY
635
#undef ldfun
636
#define ldfun ldq_raw
637
#else
638
    uint64_t (*ldfun)(target_ulong);
639

    
640
    switch (mem_idx)
641
    {
642
    case 0: ldfun = ldq_kernel; break;
643
    case 1: ldfun = ldq_super; break;
644
    default:
645
    case 2: ldfun = ldq_user; break;
646
    }
647
#endif
648

    
649
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
650
        target_ulong i;
651

    
652
        for (i = 0; i < base_reglist; i++) {
653
            env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
654
            addr += 8;
655
        }
656
    }
657

    
658
    if (do_r31) {
659
        env->active_tc.gpr[31] = ldfun(addr);
660
    }
661
}
662

    
663
void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
664
{
665
    target_ulong base_reglist = reglist & 0xf;
666
    target_ulong do_r31 = reglist & 0x10;
667
#ifdef CONFIG_USER_ONLY
668
#undef stfun
669
#define stfun stq_raw
670
#else
671
    void (*stfun)(target_ulong, uint64_t);
672

    
673
    switch (mem_idx)
674
    {
675
    case 0: stfun = stq_kernel; break;
676
    case 1: stfun = stq_super; break;
677
     default:
678
    case 2: stfun = stq_user; break;
679
    }
680
#endif
681

    
682
    if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
683
        target_ulong i;
684

    
685
        for (i = 0; i < base_reglist; i++) {
686
            stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
687
            addr += 8;
688
        }
689
    }
690

    
691
    if (do_r31) {
692
        stfun(addr, env->active_tc.gpr[31]);
693
    }
694
}
695
#endif
696

    
697
#ifndef CONFIG_USER_ONLY
698
/* CP0 helpers */
699
target_ulong helper_mfc0_mvpcontrol (void)
700
{
701
    return env->mvp->CP0_MVPControl;
702
}
703

    
704
target_ulong helper_mfc0_mvpconf0 (void)
705
{
706
    return env->mvp->CP0_MVPConf0;
707
}
708

    
709
target_ulong helper_mfc0_mvpconf1 (void)
710
{
711
    return env->mvp->CP0_MVPConf1;
712
}
713

    
714
target_ulong helper_mfc0_random (void)
715
{
716
    return (int32_t)cpu_mips_get_random(env);
717
}
718

    
719
target_ulong helper_mfc0_tcstatus (void)
720
{
721
    return env->active_tc.CP0_TCStatus;
722
}
723

    
724
target_ulong helper_mftc0_tcstatus(void)
725
{
726
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
727

    
728
    if (other_tc == env->current_tc)
729
        return env->active_tc.CP0_TCStatus;
730
    else
731
        return env->tcs[other_tc].CP0_TCStatus;
732
}
733

    
734
target_ulong helper_mfc0_tcbind (void)
735
{
736
    return env->active_tc.CP0_TCBind;
737
}
738

    
739
target_ulong helper_mftc0_tcbind(void)
740
{
741
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
742

    
743
    if (other_tc == env->current_tc)
744
        return env->active_tc.CP0_TCBind;
745
    else
746
        return env->tcs[other_tc].CP0_TCBind;
747
}
748

    
749
target_ulong helper_mfc0_tcrestart (void)
750
{
751
    return env->active_tc.PC;
752
}
753

    
754
target_ulong helper_mftc0_tcrestart(void)
755
{
756
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
757

    
758
    if (other_tc == env->current_tc)
759
        return env->active_tc.PC;
760
    else
761
        return env->tcs[other_tc].PC;
762
}
763

    
764
target_ulong helper_mfc0_tchalt (void)
765
{
766
    return env->active_tc.CP0_TCHalt;
767
}
768

    
769
target_ulong helper_mftc0_tchalt(void)
770
{
771
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
772

    
773
    if (other_tc == env->current_tc)
774
        return env->active_tc.CP0_TCHalt;
775
    else
776
        return env->tcs[other_tc].CP0_TCHalt;
777
}
778

    
779
target_ulong helper_mfc0_tccontext (void)
780
{
781
    return env->active_tc.CP0_TCContext;
782
}
783

    
784
target_ulong helper_mftc0_tccontext(void)
785
{
786
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
787

    
788
    if (other_tc == env->current_tc)
789
        return env->active_tc.CP0_TCContext;
790
    else
791
        return env->tcs[other_tc].CP0_TCContext;
792
}
793

    
794
target_ulong helper_mfc0_tcschedule (void)
795
{
796
    return env->active_tc.CP0_TCSchedule;
797
}
798

    
799
target_ulong helper_mftc0_tcschedule(void)
800
{
801
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
802

    
803
    if (other_tc == env->current_tc)
804
        return env->active_tc.CP0_TCSchedule;
805
    else
806
        return env->tcs[other_tc].CP0_TCSchedule;
807
}
808

    
809
target_ulong helper_mfc0_tcschefback (void)
810
{
811
    return env->active_tc.CP0_TCScheFBack;
812
}
813

    
814
target_ulong helper_mftc0_tcschefback(void)
815
{
816
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
817

    
818
    if (other_tc == env->current_tc)
819
        return env->active_tc.CP0_TCScheFBack;
820
    else
821
        return env->tcs[other_tc].CP0_TCScheFBack;
822
}
823

    
824
target_ulong helper_mfc0_count (void)
825
{
826
    return (int32_t)cpu_mips_get_count(env);
827
}
828

    
829
target_ulong helper_mftc0_entryhi(void)
830
{
831
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
832
    int32_t tcstatus;
833

    
834
    if (other_tc == env->current_tc)
835
        tcstatus = env->active_tc.CP0_TCStatus;
836
    else
837
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
838

    
839
    return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
840
}
841

    
842
target_ulong helper_mftc0_status(void)
843
{
844
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
845
    target_ulong t0;
846
    int32_t tcstatus;
847

    
848
    if (other_tc == env->current_tc)
849
        tcstatus = env->active_tc.CP0_TCStatus;
850
    else
851
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
852

    
853
    t0 = env->CP0_Status & ~0xf1000018;
854
    t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
855
    t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
856
    t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
857

    
858
    return t0;
859
}
860

    
861
target_ulong helper_mfc0_lladdr (void)
862
{
863
    return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
864
}
865

    
866
target_ulong helper_mfc0_watchlo (uint32_t sel)
867
{
868
    return (int32_t)env->CP0_WatchLo[sel];
869
}
870

    
871
target_ulong helper_mfc0_watchhi (uint32_t sel)
872
{
873
    return env->CP0_WatchHi[sel];
874
}
875

    
876
target_ulong helper_mfc0_debug (void)
877
{
878
    target_ulong t0 = env->CP0_Debug;
879
    if (env->hflags & MIPS_HFLAG_DM)
880
        t0 |= 1 << CP0DB_DM;
881

    
882
    return t0;
883
}
884

    
885
target_ulong helper_mftc0_debug(void)
886
{
887
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
888
    int32_t tcstatus;
889

    
890
    if (other_tc == env->current_tc)
891
        tcstatus = env->active_tc.CP0_Debug_tcstatus;
892
    else
893
        tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
894

    
895
    /* XXX: Might be wrong, check with EJTAG spec. */
896
    return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
897
            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
898
}
899

    
900
#if defined(TARGET_MIPS64)
901
target_ulong helper_dmfc0_tcrestart (void)
902
{
903
    return env->active_tc.PC;
904
}
905

    
906
target_ulong helper_dmfc0_tchalt (void)
907
{
908
    return env->active_tc.CP0_TCHalt;
909
}
910

    
911
target_ulong helper_dmfc0_tccontext (void)
912
{
913
    return env->active_tc.CP0_TCContext;
914
}
915

    
916
target_ulong helper_dmfc0_tcschedule (void)
917
{
918
    return env->active_tc.CP0_TCSchedule;
919
}
920

    
921
target_ulong helper_dmfc0_tcschefback (void)
922
{
923
    return env->active_tc.CP0_TCScheFBack;
924
}
925

    
926
target_ulong helper_dmfc0_lladdr (void)
927
{
928
    return env->lladdr >> env->CP0_LLAddr_shift;
929
}
930

    
931
target_ulong helper_dmfc0_watchlo (uint32_t sel)
932
{
933
    return env->CP0_WatchLo[sel];
934
}
935
#endif /* TARGET_MIPS64 */
936

    
937
void helper_mtc0_index (target_ulong arg1)
938
{
939
    int num = 1;
940
    unsigned int tmp = env->tlb->nb_tlb;
941

    
942
    do {
943
        tmp >>= 1;
944
        num <<= 1;
945
    } while (tmp);
946
    env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
947
}
948

    
949
void helper_mtc0_mvpcontrol (target_ulong arg1)
950
{
951
    uint32_t mask = 0;
952
    uint32_t newval;
953

    
954
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
955
        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
956
                (1 << CP0MVPCo_EVP);
957
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
958
        mask |= (1 << CP0MVPCo_STLB);
959
    newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
960

    
961
    // TODO: Enable/disable shared TLB, enable/disable VPEs.
962

    
963
    env->mvp->CP0_MVPControl = newval;
964
}
965

    
966
void helper_mtc0_vpecontrol (target_ulong arg1)
967
{
968
    uint32_t mask;
969
    uint32_t newval;
970

    
971
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
972
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
973
    newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
974

    
975
    /* Yield scheduler intercept not implemented. */
976
    /* Gating storage scheduler intercept not implemented. */
977

    
978
    // TODO: Enable/disable TCs.
979

    
980
    env->CP0_VPEControl = newval;
981
}
982

    
983
void helper_mtc0_vpeconf0 (target_ulong arg1)
984
{
985
    uint32_t mask = 0;
986
    uint32_t newval;
987

    
988
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
989
        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
990
            mask |= (0xff << CP0VPEC0_XTC);
991
        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
992
    }
993
    newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
994

    
995
    // TODO: TC exclusive handling due to ERL/EXL.
996

    
997
    env->CP0_VPEConf0 = newval;
998
}
999

    
1000
void helper_mtc0_vpeconf1 (target_ulong arg1)
1001
{
1002
    uint32_t mask = 0;
1003
    uint32_t newval;
1004

    
1005
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1006
        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1007
                (0xff << CP0VPEC1_NCP1);
1008
    newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1009

    
1010
    /* UDI not implemented. */
1011
    /* CP2 not implemented. */
1012

    
1013
    // TODO: Handle FPU (CP1) binding.
1014

    
1015
    env->CP0_VPEConf1 = newval;
1016
}
1017

    
1018
void helper_mtc0_yqmask (target_ulong arg1)
1019
{
1020
    /* Yield qualifier inputs not implemented. */
1021
    env->CP0_YQMask = 0x00000000;
1022
}
1023

    
1024
void helper_mtc0_vpeopt (target_ulong arg1)
1025
{
1026
    env->CP0_VPEOpt = arg1 & 0x0000ffff;
1027
}
1028

    
1029
void helper_mtc0_entrylo0 (target_ulong arg1)
1030
{
1031
    /* Large physaddr (PABITS) not implemented */
1032
    /* 1k pages not implemented */
1033
    env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1034
}
1035

    
1036
void helper_mtc0_tcstatus (target_ulong arg1)
1037
{
1038
    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1039
    uint32_t newval;
1040

    
1041
    newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1042

    
1043
    // TODO: Sync with CP0_Status.
1044

    
1045
    env->active_tc.CP0_TCStatus = newval;
1046
}
1047

    
1048
void helper_mttc0_tcstatus (target_ulong arg1)
1049
{
1050
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1051

    
1052
    // TODO: Sync with CP0_Status.
1053

    
1054
    if (other_tc == env->current_tc)
1055
        env->active_tc.CP0_TCStatus = arg1;
1056
    else
1057
        env->tcs[other_tc].CP0_TCStatus = arg1;
1058
}
1059

    
1060
void helper_mtc0_tcbind (target_ulong arg1)
1061
{
1062
    uint32_t mask = (1 << CP0TCBd_TBE);
1063
    uint32_t newval;
1064

    
1065
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1066
        mask |= (1 << CP0TCBd_CurVPE);
1067
    newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1068
    env->active_tc.CP0_TCBind = newval;
1069
}
1070

    
1071
void helper_mttc0_tcbind (target_ulong arg1)
1072
{
1073
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1074
    uint32_t mask = (1 << CP0TCBd_TBE);
1075
    uint32_t newval;
1076

    
1077
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1078
        mask |= (1 << CP0TCBd_CurVPE);
1079
    if (other_tc == env->current_tc) {
1080
        newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1081
        env->active_tc.CP0_TCBind = newval;
1082
    } else {
1083
        newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1084
        env->tcs[other_tc].CP0_TCBind = newval;
1085
    }
1086
}
1087

    
1088
void helper_mtc0_tcrestart (target_ulong arg1)
1089
{
1090
    env->active_tc.PC = arg1;
1091
    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1092
    env->lladdr = 0ULL;
1093
    /* MIPS16 not implemented. */
1094
}
1095

    
1096
void helper_mttc0_tcrestart (target_ulong arg1)
1097
{
1098
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1099

    
1100
    if (other_tc == env->current_tc) {
1101
        env->active_tc.PC = arg1;
1102
        env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1103
        env->lladdr = 0ULL;
1104
        /* MIPS16 not implemented. */
1105
    } else {
1106
        env->tcs[other_tc].PC = arg1;
1107
        env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1108
        env->lladdr = 0ULL;
1109
        /* MIPS16 not implemented. */
1110
    }
1111
}
1112

    
1113
void helper_mtc0_tchalt (target_ulong arg1)
1114
{
1115
    env->active_tc.CP0_TCHalt = arg1 & 0x1;
1116

    
1117
    // TODO: Halt TC / Restart (if allocated+active) TC.
1118
}
1119

    
1120
void helper_mttc0_tchalt (target_ulong arg1)
1121
{
1122
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1123

    
1124
    // TODO: Halt TC / Restart (if allocated+active) TC.
1125

    
1126
    if (other_tc == env->current_tc)
1127
        env->active_tc.CP0_TCHalt = arg1;
1128
    else
1129
        env->tcs[other_tc].CP0_TCHalt = arg1;
1130
}
1131

    
1132
void helper_mtc0_tccontext (target_ulong arg1)
1133
{
1134
    env->active_tc.CP0_TCContext = arg1;
1135
}
1136

    
1137
void helper_mttc0_tccontext (target_ulong arg1)
1138
{
1139
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1140

    
1141
    if (other_tc == env->current_tc)
1142
        env->active_tc.CP0_TCContext = arg1;
1143
    else
1144
        env->tcs[other_tc].CP0_TCContext = arg1;
1145
}
1146

    
1147
void helper_mtc0_tcschedule (target_ulong arg1)
1148
{
1149
    env->active_tc.CP0_TCSchedule = arg1;
1150
}
1151

    
1152
void helper_mttc0_tcschedule (target_ulong arg1)
1153
{
1154
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1155

    
1156
    if (other_tc == env->current_tc)
1157
        env->active_tc.CP0_TCSchedule = arg1;
1158
    else
1159
        env->tcs[other_tc].CP0_TCSchedule = arg1;
1160
}
1161

    
1162
void helper_mtc0_tcschefback (target_ulong arg1)
1163
{
1164
    env->active_tc.CP0_TCScheFBack = arg1;
1165
}
1166

    
1167
void helper_mttc0_tcschefback (target_ulong arg1)
1168
{
1169
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1170

    
1171
    if (other_tc == env->current_tc)
1172
        env->active_tc.CP0_TCScheFBack = arg1;
1173
    else
1174
        env->tcs[other_tc].CP0_TCScheFBack = arg1;
1175
}
1176

    
1177
void helper_mtc0_entrylo1 (target_ulong arg1)
1178
{
1179
    /* Large physaddr (PABITS) not implemented */
1180
    /* 1k pages not implemented */
1181
    env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1182
}
1183

    
1184
void helper_mtc0_context (target_ulong arg1)
1185
{
1186
    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1187
}
1188

    
1189
void helper_mtc0_pagemask (target_ulong arg1)
1190
{
1191
    /* 1k pages not implemented */
1192
    env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1193
}
1194

    
1195
void helper_mtc0_pagegrain (target_ulong arg1)
1196
{
1197
    /* SmartMIPS not implemented */
1198
    /* Large physaddr (PABITS) not implemented */
1199
    /* 1k pages not implemented */
1200
    env->CP0_PageGrain = 0;
1201
}
1202

    
1203
void helper_mtc0_wired (target_ulong arg1)
1204
{
1205
    env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1206
}
1207

    
1208
void helper_mtc0_srsconf0 (target_ulong arg1)
1209
{
1210
    env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1211
}
1212

    
1213
void helper_mtc0_srsconf1 (target_ulong arg1)
1214
{
1215
    env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1216
}
1217

    
1218
void helper_mtc0_srsconf2 (target_ulong arg1)
1219
{
1220
    env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1221
}
1222

    
1223
void helper_mtc0_srsconf3 (target_ulong arg1)
1224
{
1225
    env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1226
}
1227

    
1228
void helper_mtc0_srsconf4 (target_ulong arg1)
1229
{
1230
    env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1231
}
1232

    
1233
void helper_mtc0_hwrena (target_ulong arg1)
1234
{
1235
    env->CP0_HWREna = arg1 & 0x0000000F;
1236
}
1237

    
1238
void helper_mtc0_count (target_ulong arg1)
1239
{
1240
    cpu_mips_store_count(env, arg1);
1241
}
1242

    
1243
void helper_mtc0_entryhi (target_ulong arg1)
1244
{
1245
    target_ulong old, val;
1246

    
1247
    /* 1k pages not implemented */
1248
    val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1249
#if defined(TARGET_MIPS64)
1250
    val &= env->SEGMask;
1251
#endif
1252
    old = env->CP0_EntryHi;
1253
    env->CP0_EntryHi = val;
1254
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1255
        uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
1256
        env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
1257
    }
1258
    /* If the ASID changes, flush qemu's TLB.  */
1259
    if ((old & 0xFF) != (val & 0xFF))
1260
        cpu_mips_tlb_flush(env, 1);
1261
}
1262

    
1263
void helper_mttc0_entryhi(target_ulong arg1)
1264
{
1265
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1266
    int32_t tcstatus;
1267

    
1268
    env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (arg1 & ~0xff);
1269
    if (other_tc == env->current_tc) {
1270
        tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1271
        env->active_tc.CP0_TCStatus = tcstatus;
1272
    } else {
1273
        tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1274
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1275
    }
1276
}
1277

    
1278
void helper_mtc0_compare (target_ulong arg1)
1279
{
1280
    cpu_mips_store_compare(env, arg1);
1281
}
1282

    
1283
void helper_mtc0_status (target_ulong arg1)
1284
{
1285
    uint32_t val, old;
1286
    uint32_t mask = env->CP0_Status_rw_bitmask;
1287

    
1288
    val = arg1 & mask;
1289
    old = env->CP0_Status;
1290
    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1291
    compute_hflags(env);
1292
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1293
        qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1294
                old, old & env->CP0_Cause & CP0Ca_IP_mask,
1295
                val, val & env->CP0_Cause & CP0Ca_IP_mask,
1296
                env->CP0_Cause);
1297
        switch (env->hflags & MIPS_HFLAG_KSU) {
1298
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1299
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1300
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1301
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1302
        }
1303
    }
1304
}
1305

    
1306
void helper_mttc0_status(target_ulong arg1)
1307
{
1308
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1309
    int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
1310

    
1311
    env->CP0_Status = arg1 & ~0xf1000018;
1312
    tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (arg1 & (0xf << CP0St_CU0));
1313
    tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((arg1 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
1314
    tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((arg1 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
1315
    if (other_tc == env->current_tc)
1316
        env->active_tc.CP0_TCStatus = tcstatus;
1317
    else
1318
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1319
}
1320

    
1321
void helper_mtc0_intctl (target_ulong arg1)
1322
{
1323
    /* vectored interrupts not implemented, no performance counters. */
1324
    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (arg1 & 0x000002e0);
1325
}
1326

    
1327
void helper_mtc0_srsctl (target_ulong arg1)
1328
{
1329
    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1330
    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1331
}
1332

    
1333
void helper_mtc0_cause (target_ulong arg1)
1334
{
1335
    uint32_t mask = 0x00C00300;
1336
    uint32_t old = env->CP0_Cause;
1337
    int i;
1338

    
1339
    if (env->insn_flags & ISA_MIPS32R2)
1340
        mask |= 1 << CP0Ca_DC;
1341

    
1342
    env->CP0_Cause = (env->CP0_Cause & ~mask) | (arg1 & mask);
1343

    
1344
    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1345
        if (env->CP0_Cause & (1 << CP0Ca_DC))
1346
            cpu_mips_stop_count(env);
1347
        else
1348
            cpu_mips_start_count(env);
1349
    }
1350

    
1351
    /* Set/reset software interrupts */
1352
    for (i = 0 ; i < 2 ; i++) {
1353
        if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1354
            cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i)));
1355
        }
1356
    }
1357
}
1358

    
1359
void helper_mtc0_ebase (target_ulong arg1)
1360
{
1361
    /* vectored interrupts not implemented */
1362
    env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1363
}
1364

    
1365
void helper_mtc0_config0 (target_ulong arg1)
1366
{
1367
    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1368
}
1369

    
1370
void helper_mtc0_config2 (target_ulong arg1)
1371
{
1372
    /* tertiary/secondary caches not implemented */
1373
    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1374
}
1375

    
1376
void helper_mtc0_lladdr (target_ulong arg1)
1377
{
1378
    target_long mask = env->CP0_LLAddr_rw_bitmask;
1379
    arg1 = arg1 << env->CP0_LLAddr_shift;
1380
    env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1381
}
1382

    
1383
void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1384
{
1385
    /* Watch exceptions for instructions, data loads, data stores
1386
       not implemented. */
1387
    env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1388
}
1389

    
1390
void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1391
{
1392
    env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1393
    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1394
}
1395

    
1396
void helper_mtc0_xcontext (target_ulong arg1)
1397
{
1398
    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1399
    env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1400
}
1401

    
1402
void helper_mtc0_framemask (target_ulong arg1)
1403
{
1404
    env->CP0_Framemask = arg1; /* XXX */
1405
}
1406

    
1407
void helper_mtc0_debug (target_ulong arg1)
1408
{
1409
    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1410
    if (arg1 & (1 << CP0DB_DM))
1411
        env->hflags |= MIPS_HFLAG_DM;
1412
    else
1413
        env->hflags &= ~MIPS_HFLAG_DM;
1414
}
1415

    
1416
void helper_mttc0_debug(target_ulong arg1)
1417
{
1418
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1419
    uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1420

    
1421
    /* XXX: Might be wrong, check with EJTAG spec. */
1422
    if (other_tc == env->current_tc)
1423
        env->active_tc.CP0_Debug_tcstatus = val;
1424
    else
1425
        env->tcs[other_tc].CP0_Debug_tcstatus = val;
1426
    env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1427
                     (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1428
}
1429

    
1430
void helper_mtc0_performance0 (target_ulong arg1)
1431
{
1432
    env->CP0_Performance0 = arg1 & 0x000007ff;
1433
}
1434

    
1435
void helper_mtc0_taglo (target_ulong arg1)
1436
{
1437
    env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1438
}
1439

    
1440
void helper_mtc0_datalo (target_ulong arg1)
1441
{
1442
    env->CP0_DataLo = arg1; /* XXX */
1443
}
1444

    
1445
void helper_mtc0_taghi (target_ulong arg1)
1446
{
1447
    env->CP0_TagHi = arg1; /* XXX */
1448
}
1449

    
1450
void helper_mtc0_datahi (target_ulong arg1)
1451
{
1452
    env->CP0_DataHi = arg1; /* XXX */
1453
}
1454

    
1455
/* MIPS MT functions */
1456
target_ulong helper_mftgpr(uint32_t sel)
1457
{
1458
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1459

    
1460
    if (other_tc == env->current_tc)
1461
        return env->active_tc.gpr[sel];
1462
    else
1463
        return env->tcs[other_tc].gpr[sel];
1464
}
1465

    
1466
target_ulong helper_mftlo(uint32_t sel)
1467
{
1468
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1469

    
1470
    if (other_tc == env->current_tc)
1471
        return env->active_tc.LO[sel];
1472
    else
1473
        return env->tcs[other_tc].LO[sel];
1474
}
1475

    
1476
target_ulong helper_mfthi(uint32_t sel)
1477
{
1478
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1479

    
1480
    if (other_tc == env->current_tc)
1481
        return env->active_tc.HI[sel];
1482
    else
1483
        return env->tcs[other_tc].HI[sel];
1484
}
1485

    
1486
target_ulong helper_mftacx(uint32_t sel)
1487
{
1488
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1489

    
1490
    if (other_tc == env->current_tc)
1491
        return env->active_tc.ACX[sel];
1492
    else
1493
        return env->tcs[other_tc].ACX[sel];
1494
}
1495

    
1496
target_ulong helper_mftdsp(void)
1497
{
1498
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1499

    
1500
    if (other_tc == env->current_tc)
1501
        return env->active_tc.DSPControl;
1502
    else
1503
        return env->tcs[other_tc].DSPControl;
1504
}
1505

    
1506
void helper_mttgpr(target_ulong arg1, uint32_t sel)
1507
{
1508
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1509

    
1510
    if (other_tc == env->current_tc)
1511
        env->active_tc.gpr[sel] = arg1;
1512
    else
1513
        env->tcs[other_tc].gpr[sel] = arg1;
1514
}
1515

    
1516
void helper_mttlo(target_ulong arg1, uint32_t sel)
1517
{
1518
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1519

    
1520
    if (other_tc == env->current_tc)
1521
        env->active_tc.LO[sel] = arg1;
1522
    else
1523
        env->tcs[other_tc].LO[sel] = arg1;
1524
}
1525

    
1526
void helper_mtthi(target_ulong arg1, uint32_t sel)
1527
{
1528
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1529

    
1530
    if (other_tc == env->current_tc)
1531
        env->active_tc.HI[sel] = arg1;
1532
    else
1533
        env->tcs[other_tc].HI[sel] = arg1;
1534
}
1535

    
1536
void helper_mttacx(target_ulong arg1, uint32_t sel)
1537
{
1538
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1539

    
1540
    if (other_tc == env->current_tc)
1541
        env->active_tc.ACX[sel] = arg1;
1542
    else
1543
        env->tcs[other_tc].ACX[sel] = arg1;
1544
}
1545

    
1546
void helper_mttdsp(target_ulong arg1)
1547
{
1548
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1549

    
1550
    if (other_tc == env->current_tc)
1551
        env->active_tc.DSPControl = arg1;
1552
    else
1553
        env->tcs[other_tc].DSPControl = arg1;
1554
}
1555

    
1556
/* MIPS MT functions */
1557
target_ulong helper_dmt(void)
1558
{
1559
    // TODO
1560
     return 0;
1561
}
1562

    
1563
target_ulong helper_emt(void)
1564
{
1565
    // TODO
1566
    return 0;
1567
}
1568

    
1569
target_ulong helper_dvpe(void)
1570
{
1571
    // TODO
1572
    return 0;
1573
}
1574

    
1575
target_ulong helper_evpe(void)
1576
{
1577
    // TODO
1578
    return 0;
1579
}
1580
#endif /* !CONFIG_USER_ONLY */
1581

    
1582
void helper_fork(target_ulong arg1, target_ulong arg2)
1583
{
1584
    // arg1 = rt, arg2 = rs
1585
    arg1 = 0;
1586
    // TODO: store to TC register
1587
}
1588

    
1589
target_ulong helper_yield(target_ulong arg)
1590
{
1591
    target_long arg1 = arg;
1592

    
1593
    if (arg1 < 0) {
1594
        /* No scheduling policy implemented. */
1595
        if (arg1 != -2) {
1596
            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1597
                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1598
                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1599
                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1600
                helper_raise_exception(EXCP_THREAD);
1601
            }
1602
        }
1603
    } else if (arg1 == 0) {
1604
        if (0 /* TODO: TC underflow */) {
1605
            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1606
            helper_raise_exception(EXCP_THREAD);
1607
        } else {
1608
            // TODO: Deallocate TC
1609
        }
1610
    } else if (arg1 > 0) {
1611
        /* Yield qualifier inputs not implemented. */
1612
        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1613
        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1614
        helper_raise_exception(EXCP_THREAD);
1615
    }
1616
    return env->CP0_YQMask;
1617
}
1618

    
1619
#ifndef CONFIG_USER_ONLY
1620
/* TLB management */
1621
static void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1622
{
1623
    /* Flush qemu's TLB and discard all shadowed entries.  */
1624
    tlb_flush (env, flush_global);
1625
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
1626
}
1627

    
1628
static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1629
{
1630
    /* Discard entries from env->tlb[first] onwards.  */
1631
    while (env->tlb->tlb_in_use > first) {
1632
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1633
    }
1634
}
1635

    
1636
static void r4k_fill_tlb (int idx)
1637
{
1638
    r4k_tlb_t *tlb;
1639

    
1640
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1641
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1642
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1643
#if defined(TARGET_MIPS64)
1644
    tlb->VPN &= env->SEGMask;
1645
#endif
1646
    tlb->ASID = env->CP0_EntryHi & 0xFF;
1647
    tlb->PageMask = env->CP0_PageMask;
1648
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1649
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1650
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1651
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1652
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1653
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1654
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1655
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1656
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1657
}
1658

    
1659
void r4k_helper_tlbwi (void)
1660
{
1661
    int idx;
1662

    
1663
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1664

    
1665
    /* Discard cached TLB entries.  We could avoid doing this if the
1666
       tlbwi is just upgrading access permissions on the current entry;
1667
       that might be a further win.  */
1668
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1669

    
1670
    r4k_invalidate_tlb(env, idx, 0);
1671
    r4k_fill_tlb(idx);
1672
}
1673

    
1674
void r4k_helper_tlbwr (void)
1675
{
1676
    int r = cpu_mips_get_random(env);
1677

    
1678
    r4k_invalidate_tlb(env, r, 1);
1679
    r4k_fill_tlb(r);
1680
}
1681

    
1682
void r4k_helper_tlbp (void)
1683
{
1684
    r4k_tlb_t *tlb;
1685
    target_ulong mask;
1686
    target_ulong tag;
1687
    target_ulong VPN;
1688
    uint8_t ASID;
1689
    int i;
1690

    
1691
    ASID = env->CP0_EntryHi & 0xFF;
1692
    for (i = 0; i < env->tlb->nb_tlb; i++) {
1693
        tlb = &env->tlb->mmu.r4k.tlb[i];
1694
        /* 1k pages are not supported. */
1695
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1696
        tag = env->CP0_EntryHi & ~mask;
1697
        VPN = tlb->VPN & ~mask;
1698
        /* Check ASID, virtual page number & size */
1699
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1700
            /* TLB match */
1701
            env->CP0_Index = i;
1702
            break;
1703
        }
1704
    }
1705
    if (i == env->tlb->nb_tlb) {
1706
        /* No match.  Discard any shadow entries, if any of them match.  */
1707
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1708
            tlb = &env->tlb->mmu.r4k.tlb[i];
1709
            /* 1k pages are not supported. */
1710
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1711
            tag = env->CP0_EntryHi & ~mask;
1712
            VPN = tlb->VPN & ~mask;
1713
            /* Check ASID, virtual page number & size */
1714
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1715
                r4k_mips_tlb_flush_extra (env, i);
1716
                break;
1717
            }
1718
        }
1719

    
1720
        env->CP0_Index |= 0x80000000;
1721
    }
1722
}
1723

    
1724
void r4k_helper_tlbr (void)
1725
{
1726
    r4k_tlb_t *tlb;
1727
    uint8_t ASID;
1728
    int idx;
1729

    
1730
    ASID = env->CP0_EntryHi & 0xFF;
1731
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1732
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1733

    
1734
    /* If this will change the current ASID, flush qemu's TLB.  */
1735
    if (ASID != tlb->ASID)
1736
        cpu_mips_tlb_flush (env, 1);
1737

    
1738
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1739

    
1740
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1741
    env->CP0_PageMask = tlb->PageMask;
1742
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1743
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1744
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1745
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1746
}
1747

    
1748
void helper_tlbwi(void)
1749
{
1750
    env->tlb->helper_tlbwi();
1751
}
1752

    
1753
void helper_tlbwr(void)
1754
{
1755
    env->tlb->helper_tlbwr();
1756
}
1757

    
1758
void helper_tlbp(void)
1759
{
1760
    env->tlb->helper_tlbp();
1761
}
1762

    
1763
void helper_tlbr(void)
1764
{
1765
    env->tlb->helper_tlbr();
1766
}
1767

    
1768
/* Specials */
1769
target_ulong helper_di (void)
1770
{
1771
    target_ulong t0 = env->CP0_Status;
1772

    
1773
    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1774
    return t0;
1775
}
1776

    
1777
target_ulong helper_ei (void)
1778
{
1779
    target_ulong t0 = env->CP0_Status;
1780

    
1781
    env->CP0_Status = t0 | (1 << CP0St_IE);
1782
    return t0;
1783
}
1784

    
1785
static void debug_pre_eret (void)
1786
{
1787
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1788
        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1789
                env->active_tc.PC, env->CP0_EPC);
1790
        if (env->CP0_Status & (1 << CP0St_ERL))
1791
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1792
        if (env->hflags & MIPS_HFLAG_DM)
1793
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1794
        qemu_log("\n");
1795
    }
1796
}
1797

    
1798
static void debug_post_eret (void)
1799
{
1800
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1801
        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1802
                env->active_tc.PC, env->CP0_EPC);
1803
        if (env->CP0_Status & (1 << CP0St_ERL))
1804
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1805
        if (env->hflags & MIPS_HFLAG_DM)
1806
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1807
        switch (env->hflags & MIPS_HFLAG_KSU) {
1808
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1809
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1810
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1811
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1812
        }
1813
    }
1814
}
1815

    
1816
static void set_pc (target_ulong error_pc)
1817
{
1818
    env->active_tc.PC = error_pc & ~(target_ulong)1;
1819
    if (error_pc & 1) {
1820
        env->hflags |= MIPS_HFLAG_M16;
1821
    } else {
1822
        env->hflags &= ~(MIPS_HFLAG_M16);
1823
    }
1824
}
1825

    
1826
void helper_eret (void)
1827
{
1828
    debug_pre_eret();
1829
    if (env->CP0_Status & (1 << CP0St_ERL)) {
1830
        set_pc(env->CP0_ErrorEPC);
1831
        env->CP0_Status &= ~(1 << CP0St_ERL);
1832
    } else {
1833
        set_pc(env->CP0_EPC);
1834
        env->CP0_Status &= ~(1 << CP0St_EXL);
1835
    }
1836
    compute_hflags(env);
1837
    debug_post_eret();
1838
    env->lladdr = 1;
1839
}
1840

    
1841
void helper_deret (void)
1842
{
1843
    debug_pre_eret();
1844
    set_pc(env->CP0_DEPC);
1845

    
1846
    env->hflags &= MIPS_HFLAG_DM;
1847
    compute_hflags(env);
1848
    debug_post_eret();
1849
    env->lladdr = 1;
1850
}
1851
#endif /* !CONFIG_USER_ONLY */
1852

    
1853
target_ulong helper_rdhwr_cpunum(void)
1854
{
1855
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1856
        (env->CP0_HWREna & (1 << 0)))
1857
        return env->CP0_EBase & 0x3ff;
1858
    else
1859
        helper_raise_exception(EXCP_RI);
1860

    
1861
    return 0;
1862
}
1863

    
1864
target_ulong helper_rdhwr_synci_step(void)
1865
{
1866
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1867
        (env->CP0_HWREna & (1 << 1)))
1868
        return env->SYNCI_Step;
1869
    else
1870
        helper_raise_exception(EXCP_RI);
1871

    
1872
    return 0;
1873
}
1874

    
1875
target_ulong helper_rdhwr_cc(void)
1876
{
1877
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1878
        (env->CP0_HWREna & (1 << 2)))
1879
        return env->CP0_Count;
1880
    else
1881
        helper_raise_exception(EXCP_RI);
1882

    
1883
    return 0;
1884
}
1885

    
1886
target_ulong helper_rdhwr_ccres(void)
1887
{
1888
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1889
        (env->CP0_HWREna & (1 << 3)))
1890
        return env->CCRes;
1891
    else
1892
        helper_raise_exception(EXCP_RI);
1893

    
1894
    return 0;
1895
}
1896

    
1897
void helper_pmon (int function)
1898
{
1899
    function /= 2;
1900
    switch (function) {
1901
    case 2: /* TODO: char inbyte(int waitflag); */
1902
        if (env->active_tc.gpr[4] == 0)
1903
            env->active_tc.gpr[2] = -1;
1904
        /* Fall through */
1905
    case 11: /* TODO: char inbyte (void); */
1906
        env->active_tc.gpr[2] = -1;
1907
        break;
1908
    case 3:
1909
    case 12:
1910
        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1911
        break;
1912
    case 17:
1913
        break;
1914
    case 158:
1915
        {
1916
            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
1917
            printf("%s", fmt);
1918
        }
1919
        break;
1920
    }
1921
}
1922

    
1923
void helper_wait (void)
1924
{
1925
    env->halted = 1;
1926
    helper_raise_exception(EXCP_HLT);
1927
}
1928

    
1929
#if !defined(CONFIG_USER_ONLY)
1930

    
1931
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1932

    
1933
#define MMUSUFFIX _mmu
1934
#define ALIGNED_ONLY
1935

    
1936
#define SHIFT 0
1937
#include "softmmu_template.h"
1938

    
1939
#define SHIFT 1
1940
#include "softmmu_template.h"
1941

    
1942
#define SHIFT 2
1943
#include "softmmu_template.h"
1944

    
1945
#define SHIFT 3
1946
#include "softmmu_template.h"
1947

    
1948
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
1949
{
1950
    env->CP0_BadVAddr = addr;
1951
    do_restore_state (retaddr);
1952
    helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
1953
}
1954

    
1955
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1956
{
1957
    TranslationBlock *tb;
1958
    CPUState *saved_env;
1959
    unsigned long pc;
1960
    int ret;
1961

    
1962
    /* XXX: hack to restore env in all cases, even if not called from
1963
       generated code */
1964
    saved_env = env;
1965
    env = cpu_single_env;
1966
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1967
    if (ret) {
1968
        if (retaddr) {
1969
            /* now we have a real cpu fault */
1970
            pc = (unsigned long)retaddr;
1971
            tb = tb_find_pc(pc);
1972
            if (tb) {
1973
                /* the PC is inside the translated code. It means that we have
1974
                   a virtual CPU fault */
1975
                cpu_restore_state(tb, env, pc, NULL);
1976
            }
1977
        }
1978
        helper_raise_exception_err(env->exception_index, env->error_code);
1979
    }
1980
    env = saved_env;
1981
}
1982

    
1983
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
1984
                          int unused, int size)
1985
{
1986
    if (is_exec)
1987
        helper_raise_exception(EXCP_IBE);
1988
    else
1989
        helper_raise_exception(EXCP_DBE);
1990
}
1991
#endif /* !CONFIG_USER_ONLY */
1992

    
1993
/* Complex FPU operations which may need stack space. */
1994

    
1995
#define FLOAT_ONE32 make_float32(0x3f8 << 20)
1996
#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
1997
#define FLOAT_TWO32 make_float32(1 << 30)
1998
#define FLOAT_TWO64 make_float64(1ULL << 62)
1999
#define FLOAT_QNAN32 0x7fbfffff
2000
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2001
#define FLOAT_SNAN32 0x7fffffff
2002
#define FLOAT_SNAN64 0x7fffffffffffffffULL
2003

    
2004
/* convert MIPS rounding mode in FCR31 to IEEE library */
2005
static unsigned int ieee_rm[] = {
2006
    float_round_nearest_even,
2007
    float_round_to_zero,
2008
    float_round_up,
2009
    float_round_down
2010
};
2011

    
2012
#define RESTORE_ROUNDING_MODE \
2013
    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2014

    
2015
#define RESTORE_FLUSH_MODE \
2016
    set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2017

    
2018
target_ulong helper_cfc1 (uint32_t reg)
2019
{
2020
    target_ulong arg1;
2021

    
2022
    switch (reg) {
2023
    case 0:
2024
        arg1 = (int32_t)env->active_fpu.fcr0;
2025
        break;
2026
    case 25:
2027
        arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2028
        break;
2029
    case 26:
2030
        arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2031
        break;
2032
    case 28:
2033
        arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2034
        break;
2035
    default:
2036
        arg1 = (int32_t)env->active_fpu.fcr31;
2037
        break;
2038
    }
2039

    
2040
    return arg1;
2041
}
2042

    
2043
void helper_ctc1 (target_ulong arg1, uint32_t reg)
2044
{
2045
    switch(reg) {
2046
    case 25:
2047
        if (arg1 & 0xffffff00)
2048
            return;
2049
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2050
                     ((arg1 & 0x1) << 23);
2051
        break;
2052
    case 26:
2053
        if (arg1 & 0x007c0000)
2054
            return;
2055
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2056
        break;
2057
    case 28:
2058
        if (arg1 & 0x007c0000)
2059
            return;
2060
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2061
                     ((arg1 & 0x4) << 22);
2062
        break;
2063
    case 31:
2064
        if (arg1 & 0x007c0000)
2065
            return;
2066
        env->active_fpu.fcr31 = arg1;
2067
        break;
2068
    default:
2069
        return;
2070
    }
2071
    /* set rounding mode */
2072
    RESTORE_ROUNDING_MODE;
2073
    /* set flush-to-zero mode */
2074
    RESTORE_FLUSH_MODE;
2075
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2076
    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2077
        helper_raise_exception(EXCP_FPE);
2078
}
2079

    
2080
static inline char ieee_ex_to_mips(char xcpt)
2081
{
2082
    return (xcpt & float_flag_inexact) >> 5 |
2083
           (xcpt & float_flag_underflow) >> 3 |
2084
           (xcpt & float_flag_overflow) >> 1 |
2085
           (xcpt & float_flag_divbyzero) << 1 |
2086
           (xcpt & float_flag_invalid) << 4;
2087
}
2088

    
2089
static inline char mips_ex_to_ieee(char xcpt)
2090
{
2091
    return (xcpt & FP_INEXACT) << 5 |
2092
           (xcpt & FP_UNDERFLOW) << 3 |
2093
           (xcpt & FP_OVERFLOW) << 1 |
2094
           (xcpt & FP_DIV0) >> 1 |
2095
           (xcpt & FP_INVALID) >> 4;
2096
}
2097

    
2098
static inline void update_fcr31(void)
2099
{
2100
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2101

    
2102
    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2103
    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2104
        helper_raise_exception(EXCP_FPE);
2105
    else
2106
        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2107
}
2108

    
2109
/* Float support.
2110
   Single precition routines have a "s" suffix, double precision a
2111
   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2112
   paired single lower "pl", paired single upper "pu".  */
2113

    
2114
/* unary operations, modifying fp status  */
2115
uint64_t helper_float_sqrt_d(uint64_t fdt0)
2116
{
2117
    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2118
}
2119

    
2120
uint32_t helper_float_sqrt_s(uint32_t fst0)
2121
{
2122
    return float32_sqrt(fst0, &env->active_fpu.fp_status);
2123
}
2124

    
2125
uint64_t helper_float_cvtd_s(uint32_t fst0)
2126
{
2127
    uint64_t fdt2;
2128

    
2129
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2130
    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2131
    update_fcr31();
2132
    return fdt2;
2133
}
2134

    
2135
uint64_t helper_float_cvtd_w(uint32_t wt0)
2136
{
2137
    uint64_t fdt2;
2138

    
2139
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2140
    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2141
    update_fcr31();
2142
    return fdt2;
2143
}
2144

    
2145
uint64_t helper_float_cvtd_l(uint64_t dt0)
2146
{
2147
    uint64_t fdt2;
2148

    
2149
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2150
    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2151
    update_fcr31();
2152
    return fdt2;
2153
}
2154

    
2155
uint64_t helper_float_cvtl_d(uint64_t fdt0)
2156
{
2157
    uint64_t dt2;
2158

    
2159
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2160
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2161
    update_fcr31();
2162
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2163
        dt2 = FLOAT_SNAN64;
2164
    return dt2;
2165
}
2166

    
2167
uint64_t helper_float_cvtl_s(uint32_t fst0)
2168
{
2169
    uint64_t dt2;
2170

    
2171
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2172
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2173
    update_fcr31();
2174
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2175
        dt2 = FLOAT_SNAN64;
2176
    return dt2;
2177
}
2178

    
2179
uint64_t helper_float_cvtps_pw(uint64_t dt0)
2180
{
2181
    uint32_t fst2;
2182
    uint32_t fsth2;
2183

    
2184
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2185
    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2186
    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2187
    update_fcr31();
2188
    return ((uint64_t)fsth2 << 32) | fst2;
2189
}
2190

    
2191
uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2192
{
2193
    uint32_t wt2;
2194
    uint32_t wth2;
2195

    
2196
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2197
    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2198
    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2199
    update_fcr31();
2200
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2201
        wt2 = FLOAT_SNAN32;
2202
        wth2 = FLOAT_SNAN32;
2203
    }
2204
    return ((uint64_t)wth2 << 32) | wt2;
2205
}
2206

    
2207
uint32_t helper_float_cvts_d(uint64_t fdt0)
2208
{
2209
    uint32_t fst2;
2210

    
2211
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2212
    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2213
    update_fcr31();
2214
    return fst2;
2215
}
2216

    
2217
uint32_t helper_float_cvts_w(uint32_t wt0)
2218
{
2219
    uint32_t fst2;
2220

    
2221
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2222
    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2223
    update_fcr31();
2224
    return fst2;
2225
}
2226

    
2227
uint32_t helper_float_cvts_l(uint64_t dt0)
2228
{
2229
    uint32_t fst2;
2230

    
2231
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2232
    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2233
    update_fcr31();
2234
    return fst2;
2235
}
2236

    
2237
uint32_t helper_float_cvts_pl(uint32_t wt0)
2238
{
2239
    uint32_t wt2;
2240

    
2241
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2242
    wt2 = wt0;
2243
    update_fcr31();
2244
    return wt2;
2245
}
2246

    
2247
uint32_t helper_float_cvts_pu(uint32_t wth0)
2248
{
2249
    uint32_t wt2;
2250

    
2251
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2252
    wt2 = wth0;
2253
    update_fcr31();
2254
    return wt2;
2255
}
2256

    
2257
uint32_t helper_float_cvtw_s(uint32_t fst0)
2258
{
2259
    uint32_t wt2;
2260

    
2261
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2262
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2263
    update_fcr31();
2264
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2265
        wt2 = FLOAT_SNAN32;
2266
    return wt2;
2267
}
2268

    
2269
uint32_t helper_float_cvtw_d(uint64_t fdt0)
2270
{
2271
    uint32_t wt2;
2272

    
2273
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2274
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2275
    update_fcr31();
2276
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2277
        wt2 = FLOAT_SNAN32;
2278
    return wt2;
2279
}
2280

    
2281
uint64_t helper_float_roundl_d(uint64_t fdt0)
2282
{
2283
    uint64_t dt2;
2284

    
2285
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2286
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2287
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2288
    RESTORE_ROUNDING_MODE;
2289
    update_fcr31();
2290
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2291
        dt2 = FLOAT_SNAN64;
2292
    return dt2;
2293
}
2294

    
2295
uint64_t helper_float_roundl_s(uint32_t fst0)
2296
{
2297
    uint64_t dt2;
2298

    
2299
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2300
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2301
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2302
    RESTORE_ROUNDING_MODE;
2303
    update_fcr31();
2304
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2305
        dt2 = FLOAT_SNAN64;
2306
    return dt2;
2307
}
2308

    
2309
uint32_t helper_float_roundw_d(uint64_t fdt0)
2310
{
2311
    uint32_t wt2;
2312

    
2313
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2314
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2315
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2316
    RESTORE_ROUNDING_MODE;
2317
    update_fcr31();
2318
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2319
        wt2 = FLOAT_SNAN32;
2320
    return wt2;
2321
}
2322

    
2323
uint32_t helper_float_roundw_s(uint32_t fst0)
2324
{
2325
    uint32_t wt2;
2326

    
2327
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2328
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2329
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2330
    RESTORE_ROUNDING_MODE;
2331
    update_fcr31();
2332
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2333
        wt2 = FLOAT_SNAN32;
2334
    return wt2;
2335
}
2336

    
2337
uint64_t helper_float_truncl_d(uint64_t fdt0)
2338
{
2339
    uint64_t dt2;
2340

    
2341
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2342
    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2343
    update_fcr31();
2344
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2345
        dt2 = FLOAT_SNAN64;
2346
    return dt2;
2347
}
2348

    
2349
uint64_t helper_float_truncl_s(uint32_t fst0)
2350
{
2351
    uint64_t dt2;
2352

    
2353
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2354
    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2355
    update_fcr31();
2356
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2357
        dt2 = FLOAT_SNAN64;
2358
    return dt2;
2359
}
2360

    
2361
uint32_t helper_float_truncw_d(uint64_t fdt0)
2362
{
2363
    uint32_t wt2;
2364

    
2365
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2366
    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2367
    update_fcr31();
2368
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2369
        wt2 = FLOAT_SNAN32;
2370
    return wt2;
2371
}
2372

    
2373
uint32_t helper_float_truncw_s(uint32_t fst0)
2374
{
2375
    uint32_t wt2;
2376

    
2377
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2378
    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2379
    update_fcr31();
2380
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2381
        wt2 = FLOAT_SNAN32;
2382
    return wt2;
2383
}
2384

    
2385
uint64_t helper_float_ceill_d(uint64_t fdt0)
2386
{
2387
    uint64_t dt2;
2388

    
2389
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2390
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2391
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2392
    RESTORE_ROUNDING_MODE;
2393
    update_fcr31();
2394
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2395
        dt2 = FLOAT_SNAN64;
2396
    return dt2;
2397
}
2398

    
2399
uint64_t helper_float_ceill_s(uint32_t fst0)
2400
{
2401
    uint64_t dt2;
2402

    
2403
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2404
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2405
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2406
    RESTORE_ROUNDING_MODE;
2407
    update_fcr31();
2408
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2409
        dt2 = FLOAT_SNAN64;
2410
    return dt2;
2411
}
2412

    
2413
uint32_t helper_float_ceilw_d(uint64_t fdt0)
2414
{
2415
    uint32_t wt2;
2416

    
2417
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2418
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2419
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2420
    RESTORE_ROUNDING_MODE;
2421
    update_fcr31();
2422
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2423
        wt2 = FLOAT_SNAN32;
2424
    return wt2;
2425
}
2426

    
2427
uint32_t helper_float_ceilw_s(uint32_t fst0)
2428
{
2429
    uint32_t wt2;
2430

    
2431
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2432
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2433
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2434
    RESTORE_ROUNDING_MODE;
2435
    update_fcr31();
2436
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2437
        wt2 = FLOAT_SNAN32;
2438
    return wt2;
2439
}
2440

    
2441
uint64_t helper_float_floorl_d(uint64_t fdt0)
2442
{
2443
    uint64_t dt2;
2444

    
2445
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2446
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2447
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2448
    RESTORE_ROUNDING_MODE;
2449
    update_fcr31();
2450
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2451
        dt2 = FLOAT_SNAN64;
2452
    return dt2;
2453
}
2454

    
2455
uint64_t helper_float_floorl_s(uint32_t fst0)
2456
{
2457
    uint64_t dt2;
2458

    
2459
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2460
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2461
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2462
    RESTORE_ROUNDING_MODE;
2463
    update_fcr31();
2464
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2465
        dt2 = FLOAT_SNAN64;
2466
    return dt2;
2467
}
2468

    
2469
uint32_t helper_float_floorw_d(uint64_t fdt0)
2470
{
2471
    uint32_t wt2;
2472

    
2473
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2474
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2475
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2476
    RESTORE_ROUNDING_MODE;
2477
    update_fcr31();
2478
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2479
        wt2 = FLOAT_SNAN32;
2480
    return wt2;
2481
}
2482

    
2483
uint32_t helper_float_floorw_s(uint32_t fst0)
2484
{
2485
    uint32_t wt2;
2486

    
2487
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2488
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2489
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2490
    RESTORE_ROUNDING_MODE;
2491
    update_fcr31();
2492
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2493
        wt2 = FLOAT_SNAN32;
2494
    return wt2;
2495
}
2496

    
2497
/* unary operations, not modifying fp status  */
2498
#define FLOAT_UNOP(name)                                       \
2499
uint64_t helper_float_ ## name ## _d(uint64_t fdt0)                \
2500
{                                                              \
2501
    return float64_ ## name(fdt0);                             \
2502
}                                                              \
2503
uint32_t helper_float_ ## name ## _s(uint32_t fst0)                \
2504
{                                                              \
2505
    return float32_ ## name(fst0);                             \
2506
}                                                              \
2507
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0)               \
2508
{                                                              \
2509
    uint32_t wt0;                                              \
2510
    uint32_t wth0;                                             \
2511
                                                               \
2512
    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2513
    wth0 = float32_ ## name(fdt0 >> 32);                       \
2514
    return ((uint64_t)wth0 << 32) | wt0;                       \
2515
}
2516
FLOAT_UNOP(abs)
2517
FLOAT_UNOP(chs)
2518
#undef FLOAT_UNOP
2519

    
2520
/* MIPS specific unary operations */
2521
uint64_t helper_float_recip_d(uint64_t fdt0)
2522
{
2523
    uint64_t fdt2;
2524

    
2525
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2526
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2527
    update_fcr31();
2528
    return fdt2;
2529
}
2530

    
2531
uint32_t helper_float_recip_s(uint32_t fst0)
2532
{
2533
    uint32_t fst2;
2534

    
2535
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2536
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2537
    update_fcr31();
2538
    return fst2;
2539
}
2540

    
2541
uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2542
{
2543
    uint64_t fdt2;
2544

    
2545
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2546
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2547
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2548
    update_fcr31();
2549
    return fdt2;
2550
}
2551

    
2552
uint32_t helper_float_rsqrt_s(uint32_t fst0)
2553
{
2554
    uint32_t fst2;
2555

    
2556
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2557
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2558
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2559
    update_fcr31();
2560
    return fst2;
2561
}
2562

    
2563
uint64_t helper_float_recip1_d(uint64_t fdt0)
2564
{
2565
    uint64_t fdt2;
2566

    
2567
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2568
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2569
    update_fcr31();
2570
    return fdt2;
2571
}
2572

    
2573
uint32_t helper_float_recip1_s(uint32_t fst0)
2574
{
2575
    uint32_t fst2;
2576

    
2577
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2578
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2579
    update_fcr31();
2580
    return fst2;
2581
}
2582

    
2583
uint64_t helper_float_recip1_ps(uint64_t fdt0)
2584
{
2585
    uint32_t fst2;
2586
    uint32_t fsth2;
2587

    
2588
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2589
    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2590
    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2591
    update_fcr31();
2592
    return ((uint64_t)fsth2 << 32) | fst2;
2593
}
2594

    
2595
uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2596
{
2597
    uint64_t fdt2;
2598

    
2599
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2600
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2601
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2602
    update_fcr31();
2603
    return fdt2;
2604
}
2605

    
2606
uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2607
{
2608
    uint32_t fst2;
2609

    
2610
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2611
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2612
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2613
    update_fcr31();
2614
    return fst2;
2615
}
2616

    
2617
uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2618
{
2619
    uint32_t fst2;
2620
    uint32_t fsth2;
2621

    
2622
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2623
    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2624
    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2625
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2626
    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2627
    update_fcr31();
2628
    return ((uint64_t)fsth2 << 32) | fst2;
2629
}
2630

    
2631
#define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2632

    
2633
/* binary operations */
2634
#define FLOAT_BINOP(name)                                          \
2635
uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
2636
{                                                                  \
2637
    uint64_t dt2;                                                  \
2638
                                                                   \
2639
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2640
    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
2641
    update_fcr31();                                                \
2642
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2643
        dt2 = FLOAT_QNAN64;                                        \
2644
    return dt2;                                                    \
2645
}                                                                  \
2646
                                                                   \
2647
uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
2648
{                                                                  \
2649
    uint32_t wt2;                                                  \
2650
                                                                   \
2651
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2652
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2653
    update_fcr31();                                                \
2654
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2655
        wt2 = FLOAT_QNAN32;                                        \
2656
    return wt2;                                                    \
2657
}                                                                  \
2658
                                                                   \
2659
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
2660
{                                                                  \
2661
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2662
    uint32_t fsth0 = fdt0 >> 32;                                   \
2663
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2664
    uint32_t fsth1 = fdt1 >> 32;                                   \
2665
    uint32_t wt2;                                                  \
2666
    uint32_t wth2;                                                 \
2667
                                                                   \
2668
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2669
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2670
    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
2671
    update_fcr31();                                                \
2672
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
2673
        wt2 = FLOAT_QNAN32;                                        \
2674
        wth2 = FLOAT_QNAN32;                                       \
2675
    }                                                              \
2676
    return ((uint64_t)wth2 << 32) | wt2;                           \
2677
}
2678

    
2679
FLOAT_BINOP(add)
2680
FLOAT_BINOP(sub)
2681
FLOAT_BINOP(mul)
2682
FLOAT_BINOP(div)
2683
#undef FLOAT_BINOP
2684

    
2685
/* ternary operations */
2686
#define FLOAT_TERNOP(name1, name2)                                        \
2687
uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
2688
                                           uint64_t fdt2)                 \
2689
{                                                                         \
2690
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2691
    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2692
}                                                                         \
2693
                                                                          \
2694
uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
2695
                                           uint32_t fst2)                 \
2696
{                                                                         \
2697
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2698
    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2699
}                                                                         \
2700
                                                                          \
2701
uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2702
                                            uint64_t fdt2)                \
2703
{                                                                         \
2704
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2705
    uint32_t fsth0 = fdt0 >> 32;                                          \
2706
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2707
    uint32_t fsth1 = fdt1 >> 32;                                          \
2708
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2709
    uint32_t fsth2 = fdt2 >> 32;                                          \
2710
                                                                          \
2711
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2712
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2713
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2714
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2715
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2716
}
2717

    
2718
FLOAT_TERNOP(mul, add)
2719
FLOAT_TERNOP(mul, sub)
2720
#undef FLOAT_TERNOP
2721

    
2722
/* negated ternary operations */
2723
#define FLOAT_NTERNOP(name1, name2)                                       \
2724
uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2725
                                           uint64_t fdt2)                 \
2726
{                                                                         \
2727
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2728
    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2729
    return float64_chs(fdt2);                                             \
2730
}                                                                         \
2731
                                                                          \
2732
uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2733
                                           uint32_t fst2)                 \
2734
{                                                                         \
2735
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2736
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2737
    return float32_chs(fst2);                                             \
2738
}                                                                         \
2739
                                                                          \
2740
uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2741
                                           uint64_t fdt2)                 \
2742
{                                                                         \
2743
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2744
    uint32_t fsth0 = fdt0 >> 32;                                          \
2745
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2746
    uint32_t fsth1 = fdt1 >> 32;                                          \
2747
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2748
    uint32_t fsth2 = fdt2 >> 32;                                          \
2749
                                                                          \
2750
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2751
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2752
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2753
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2754
    fst2 = float32_chs(fst2);                                             \
2755
    fsth2 = float32_chs(fsth2);                                           \
2756
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2757
}
2758

    
2759
FLOAT_NTERNOP(mul, add)
2760
FLOAT_NTERNOP(mul, sub)
2761
#undef FLOAT_NTERNOP
2762

    
2763
/* MIPS specific binary operations */
2764
uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2765
{
2766
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2767
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2768
    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2769
    update_fcr31();
2770
    return fdt2;
2771
}
2772

    
2773
uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
2774
{
2775
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2776
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2777
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2778
    update_fcr31();
2779
    return fst2;
2780
}
2781

    
2782
uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2783
{
2784
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2785
    uint32_t fsth0 = fdt0 >> 32;
2786
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2787
    uint32_t fsth2 = fdt2 >> 32;
2788

    
2789
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2790
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2791
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2792
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2793
    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
2794
    update_fcr31();
2795
    return ((uint64_t)fsth2 << 32) | fst2;
2796
}
2797

    
2798
uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
2799
{
2800
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2801
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2802
    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
2803
    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
2804
    update_fcr31();
2805
    return fdt2;
2806
}
2807

    
2808
uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
2809
{
2810
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2811
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2812
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2813
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2814
    update_fcr31();
2815
    return fst2;
2816
}
2817

    
2818
uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
2819
{
2820
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2821
    uint32_t fsth0 = fdt0 >> 32;
2822
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2823
    uint32_t fsth2 = fdt2 >> 32;
2824

    
2825
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2826
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2827
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2828
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2829
    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
2830
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2831
    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
2832
    update_fcr31();
2833
    return ((uint64_t)fsth2 << 32) | fst2;
2834
}
2835

    
2836
uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
2837
{
2838
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2839
    uint32_t fsth0 = fdt0 >> 32;
2840
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2841
    uint32_t fsth1 = fdt1 >> 32;
2842
    uint32_t fst2;
2843
    uint32_t fsth2;
2844

    
2845
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2846
    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
2847
    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
2848
    update_fcr31();
2849
    return ((uint64_t)fsth2 << 32) | fst2;
2850
}
2851

    
2852
uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
2853
{
2854
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2855
    uint32_t fsth0 = fdt0 >> 32;
2856
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2857
    uint32_t fsth1 = fdt1 >> 32;
2858
    uint32_t fst2;
2859
    uint32_t fsth2;
2860

    
2861
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2862
    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
2863
    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
2864
    update_fcr31();
2865
    return ((uint64_t)fsth2 << 32) | fst2;
2866
}
2867

    
2868
/* compare operations */
2869
#define FOP_COND_D(op, cond)                                   \
2870
void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2871
{                                                              \
2872
    int c = cond;                                              \
2873
    update_fcr31();                                            \
2874
    if (c)                                                     \
2875
        SET_FP_COND(cc, env->active_fpu);                      \
2876
    else                                                       \
2877
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2878
}                                                              \
2879
void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2880
{                                                              \
2881
    int c;                                                     \
2882
    fdt0 = float64_abs(fdt0);                                  \
2883
    fdt1 = float64_abs(fdt1);                                  \
2884
    c = cond;                                                  \
2885
    update_fcr31();                                            \
2886
    if (c)                                                     \
2887
        SET_FP_COND(cc, env->active_fpu);                      \
2888
    else                                                       \
2889
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2890
}
2891

    
2892
/* NOTE: the comma operator will make "cond" to eval to false,
2893
 * but float64_unordered_quiet() is still called. */
2894
FOP_COND_D(f,   (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
2895
FOP_COND_D(un,  float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
2896
FOP_COND_D(eq,  !float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2897
FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2898
FOP_COND_D(olt, !float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2899
FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2900
FOP_COND_D(ole, !float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2901
FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2902
/* NOTE: the comma operator will make "cond" to eval to false,
2903
 * but float64_unordered() is still called. */
2904
FOP_COND_D(sf,  (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
2905
FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
2906
FOP_COND_D(seq, !float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2907
FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2908
FOP_COND_D(lt,  !float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2909
FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2910
FOP_COND_D(le,  !float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2911
FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2912

    
2913
#define FOP_COND_S(op, cond)                                   \
2914
void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
2915
{                                                              \
2916
    int c = cond;                                              \
2917
    update_fcr31();                                            \
2918
    if (c)                                                     \
2919
        SET_FP_COND(cc, env->active_fpu);                      \
2920
    else                                                       \
2921
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2922
}                                                              \
2923
void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2924
{                                                              \
2925
    int c;                                                     \
2926
    fst0 = float32_abs(fst0);                                  \
2927
    fst1 = float32_abs(fst1);                                  \
2928
    c = cond;                                                  \
2929
    update_fcr31();                                            \
2930
    if (c)                                                     \
2931
        SET_FP_COND(cc, env->active_fpu);                      \
2932
    else                                                       \
2933
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2934
}
2935

    
2936
/* NOTE: the comma operator will make "cond" to eval to false,
2937
 * but float32_unordered_quiet() is still called. */
2938
FOP_COND_S(f,   (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
2939
FOP_COND_S(un,  float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
2940
FOP_COND_S(eq,  !float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2941
FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2942
FOP_COND_S(olt, !float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2943
FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2944
FOP_COND_S(ole, !float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2945
FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2946
/* NOTE: the comma operator will make "cond" to eval to false,
2947
 * but float32_unordered() is still called. */
2948
FOP_COND_S(sf,  (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
2949
FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
2950
FOP_COND_S(seq, !float32_unordered(fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2951
FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2952
FOP_COND_S(lt,  !float32_unordered(fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2953
FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2954
FOP_COND_S(le,  !float32_unordered(fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2955
FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2956

    
2957
#define FOP_COND_PS(op, condl, condh)                           \
2958
void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2959
{                                                               \
2960
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2961
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
2962
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
2963
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
2964
    int cl = condl;                                             \
2965
    int ch = condh;                                             \
2966
                                                                \
2967
    update_fcr31();                                             \
2968
    if (cl)                                                     \
2969
        SET_FP_COND(cc, env->active_fpu);                       \
2970
    else                                                        \
2971
        CLEAR_FP_COND(cc, env->active_fpu);                     \
2972
    if (ch)                                                     \
2973
        SET_FP_COND(cc + 1, env->active_fpu);                   \
2974
    else                                                        \
2975
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
2976
}                                                               \
2977
void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2978
{                                                               \
2979
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2980
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
2981
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
2982
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
2983
    int cl = condl;                                             \
2984
    int ch = condh;                                             \
2985
                                                                \
2986
    update_fcr31();                                             \
2987
    if (cl)                                                     \
2988
        SET_FP_COND(cc, env->active_fpu);                       \
2989
    else                                                        \
2990
        CLEAR_FP_COND(cc, env->active_fpu);                     \
2991
    if (ch)                                                     \
2992
        SET_FP_COND(cc + 1, env->active_fpu);                   \
2993
    else                                                        \
2994
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
2995
}
2996

    
2997
/* NOTE: the comma operator will make "cond" to eval to false,
2998
 * but float32_unordered_quiet() is still called. */
2999
FOP_COND_PS(f,   (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3000
                 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3001
FOP_COND_PS(un,  float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3002
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3003
FOP_COND_PS(eq,  !float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3004
                 !float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3005
FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3006
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3007
FOP_COND_PS(olt, !float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3008
                 !float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3009
FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3010
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3011
FOP_COND_PS(ole, !float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3012
                 !float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3013
FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3014
                 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3015
/* NOTE: the comma operator will make "cond" to eval to false,
3016
 * but float32_unordered() is still called. */
3017
FOP_COND_PS(sf,  (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3018
                 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3019
FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3020
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3021
FOP_COND_PS(seq, !float32_unordered(fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3022
                 !float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3023
FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3024
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3025
FOP_COND_PS(lt,  !float32_unordered(fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3026
                 !float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3027
FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3028
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3029
FOP_COND_PS(le,  !float32_unordered(fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
3030
                 !float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3031
FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3032
                 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))