Statistics
| Branch: | Revision:

root / target-mips / op_helper.c @ 64e58fe5

History | View | Annotate | Download (89.5 kB)

1
/*
2
 *  MIPS emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2004-2005 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdlib.h>
20
#include "exec.h"
21

    
22
#include "host-utils.h"
23

    
24
#include "helper.h"
25
/*****************************************************************************/
26
/* Exceptions processing helpers */
27

    
28
void helper_raise_exception_err (uint32_t exception, int error_code)
29
{
30
#if 1
31
    if (exception < 0x100)
32
        qemu_log("%s: %d %d\n", __func__, exception, error_code);
33
#endif
34
    env->exception_index = exception;
35
    env->error_code = error_code;
36
    cpu_loop_exit();
37
}
38

    
39
void helper_raise_exception (uint32_t exception)
40
{
41
    helper_raise_exception_err(exception, 0);
42
}
43

    
44
void helper_interrupt_restart (void)
45
{
46
    if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
47
        !(env->CP0_Status & (1 << CP0St_ERL)) &&
48
        !(env->hflags & MIPS_HFLAG_DM) &&
49
        (env->CP0_Status & (1 << CP0St_IE)) &&
50
        (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
51
        env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
52
        helper_raise_exception(EXCP_EXT_INTERRUPT);
53
    }
54
}
55

    
56
#if !defined(CONFIG_USER_ONLY)
57
static void do_restore_state (void *pc_ptr)
58
{
59
    TranslationBlock *tb;
60
    unsigned long pc = (unsigned long) pc_ptr;
61
    
62
    tb = tb_find_pc (pc);
63
    if (tb) {
64
        cpu_restore_state (tb, env, pc, NULL);
65
    }
66
}
67
#endif
68

    
69
target_ulong helper_clo (target_ulong arg1)
70
{
71
    return clo32(arg1);
72
}
73

    
74
target_ulong helper_clz (target_ulong arg1)
75
{
76
    return clz32(arg1);
77
}
78

    
79
#if defined(TARGET_MIPS64)
80
target_ulong helper_dclo (target_ulong arg1)
81
{
82
    return clo64(arg1);
83
}
84

    
85
target_ulong helper_dclz (target_ulong arg1)
86
{
87
    return clz64(arg1);
88
}
89
#endif /* TARGET_MIPS64 */
90

    
91
/* 64 bits arithmetic for 32 bits hosts */
92
static inline uint64_t get_HILO (void)
93
{
94
    return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
95
}
96

    
97
static inline void set_HILO (uint64_t HILO)
98
{
99
    env->active_tc.LO[0] = (int32_t)HILO;
100
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
101
}
102

    
103
static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
104
{
105
    env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
106
    arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
107
}
108

    
109
static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
110
{
111
    arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
112
    env->active_tc.HI[0] = (int32_t)(HILO >> 32);
113
}
114

    
115
/* Multiplication variants of the vr54xx. */
116
target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
117
{
118
    set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
119

    
120
    return arg1;
121
}
122

    
123
target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
124
{
125
    set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
126

    
127
    return arg1;
128
}
129

    
130
target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
131
{
132
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
133

    
134
    return arg1;
135
}
136

    
137
target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
138
{
139
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
140

    
141
    return arg1;
142
}
143

    
144
target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
145
{
146
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
147

    
148
    return arg1;
149
}
150

    
151
target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
152
{
153
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
154

    
155
    return arg1;
156
}
157

    
158
target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
159
{
160
    set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
161

    
162
    return arg1;
163
}
164

    
165
target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
166
{
167
    set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
168

    
169
    return arg1;
170
}
171

    
172
target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
173
{
174
    set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
175

    
176
    return arg1;
177
}
178

    
179
target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
180
{
181
    set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
182

    
183
    return arg1;
184
}
185

    
186
target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
187
{
188
    set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
189

    
190
    return arg1;
191
}
192

    
193
target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
194
{
195
    set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
196

    
197
    return arg1;
198
}
199

    
200
target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
201
{
202
    set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
203

    
204
    return arg1;
205
}
206

    
207
target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
208
{
209
    set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
210

    
211
    return arg1;
212
}
213

    
214
#ifdef TARGET_MIPS64
215
void helper_dmult (target_ulong arg1, target_ulong arg2)
216
{
217
    muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
218
}
219

    
220
void helper_dmultu (target_ulong arg1, target_ulong arg2)
221
{
222
    mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
223
}
224
#endif
225

    
226
#ifdef TARGET_WORDS_BIGENDIAN
227
#define GET_LMASK(v) ((v) & 3)
228
#define GET_OFFSET(addr, offset) (addr + (offset))
229
#else
230
#define GET_LMASK(v) (((v) & 3) ^ 3)
231
#define GET_OFFSET(addr, offset) (addr - (offset))
232
#endif
233

    
234
target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
235
{
236
    target_ulong tmp;
237

    
238
#ifdef CONFIG_USER_ONLY
239
#define ldfun ldub_raw
240
#else
241
    int (*ldfun)(target_ulong);
242

    
243
    switch (mem_idx)
244
    {
245
    case 0: ldfun = ldub_kernel; break;
246
    case 1: ldfun = ldub_super; break;
247
    default:
248
    case 2: ldfun = ldub_user; break;
249
    }
250
#endif
251
    tmp = ldfun(arg2);
252
    arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
253

    
254
    if (GET_LMASK(arg2) <= 2) {
255
        tmp = ldfun(GET_OFFSET(arg2, 1));
256
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
257
    }
258

    
259
    if (GET_LMASK(arg2) <= 1) {
260
        tmp = ldfun(GET_OFFSET(arg2, 2));
261
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
262
    }
263

    
264
    if (GET_LMASK(arg2) == 0) {
265
        tmp = ldfun(GET_OFFSET(arg2, 3));
266
        arg1 = (arg1 & 0xFFFFFF00) | tmp;
267
    }
268
    return (int32_t)arg1;
269
}
270

    
271
target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
272
{
273
    target_ulong tmp;
274

    
275
#ifdef CONFIG_USER_ONLY
276
#define ldfun ldub_raw
277
#else
278
    int (*ldfun)(target_ulong);
279

    
280
    switch (mem_idx)
281
    {
282
    case 0: ldfun = ldub_kernel; break;
283
    case 1: ldfun = ldub_super; break;
284
    default:
285
    case 2: ldfun = ldub_user; break;
286
    }
287
#endif
288
    tmp = ldfun(arg2);
289
    arg1 = (arg1 & 0xFFFFFF00) | tmp;
290

    
291
    if (GET_LMASK(arg2) >= 1) {
292
        tmp = ldfun(GET_OFFSET(arg2, -1));
293
        arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
294
    }
295

    
296
    if (GET_LMASK(arg2) >= 2) {
297
        tmp = ldfun(GET_OFFSET(arg2, -2));
298
        arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
299
    }
300

    
301
    if (GET_LMASK(arg2) == 3) {
302
        tmp = ldfun(GET_OFFSET(arg2, -3));
303
        arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
304
    }
305
    return (int32_t)arg1;
306
}
307

    
308
void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
309
{
310
#ifdef CONFIG_USER_ONLY
311
#define stfun stb_raw
312
#else
313
    void (*stfun)(target_ulong, int);
314

    
315
    switch (mem_idx)
316
    {
317
    case 0: stfun = stb_kernel; break;
318
    case 1: stfun = stb_super; break;
319
    default:
320
    case 2: stfun = stb_user; break;
321
    }
322
#endif
323
    stfun(arg2, (uint8_t)(arg1 >> 24));
324

    
325
    if (GET_LMASK(arg2) <= 2)
326
        stfun(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16));
327

    
328
    if (GET_LMASK(arg2) <= 1)
329
        stfun(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8));
330

    
331
    if (GET_LMASK(arg2) == 0)
332
        stfun(GET_OFFSET(arg2, 3), (uint8_t)arg1);
333
}
334

    
335
void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
336
{
337
#ifdef CONFIG_USER_ONLY
338
#define stfun stb_raw
339
#else
340
    void (*stfun)(target_ulong, int);
341

    
342
    switch (mem_idx)
343
    {
344
    case 0: stfun = stb_kernel; break;
345
    case 1: stfun = stb_super; break;
346
    default:
347
    case 2: stfun = stb_user; break;
348
    }
349
#endif
350
    stfun(arg2, (uint8_t)arg1);
351

    
352
    if (GET_LMASK(arg2) >= 1)
353
        stfun(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8));
354

    
355
    if (GET_LMASK(arg2) >= 2)
356
        stfun(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16));
357

    
358
    if (GET_LMASK(arg2) == 3)
359
        stfun(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24));
360
}
361

    
362
#if defined(TARGET_MIPS64)
363
/* "half" load and stores.  We must do the memory access inline,
364
   or fault handling won't work.  */
365

    
366
#ifdef TARGET_WORDS_BIGENDIAN
367
#define GET_LMASK64(v) ((v) & 7)
368
#else
369
#define GET_LMASK64(v) (((v) & 7) ^ 7)
370
#endif
371

    
372
target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
373
{
374
    uint64_t tmp;
375

    
376
#ifdef CONFIG_USER_ONLY
377
#define ldfun ldub_raw
378
#else
379
    int (*ldfun)(target_ulong);
380

    
381
    switch (mem_idx)
382
    {
383
    case 0: ldfun = ldub_kernel; break;
384
    case 1: ldfun = ldub_super; break;
385
    default:
386
    case 2: ldfun = ldub_user; break;
387
    }
388
#endif
389
    tmp = ldfun(arg2);
390
    arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
391

    
392
    if (GET_LMASK64(arg2) <= 6) {
393
        tmp = ldfun(GET_OFFSET(arg2, 1));
394
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
395
    }
396

    
397
    if (GET_LMASK64(arg2) <= 5) {
398
        tmp = ldfun(GET_OFFSET(arg2, 2));
399
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
400
    }
401

    
402
    if (GET_LMASK64(arg2) <= 4) {
403
        tmp = ldfun(GET_OFFSET(arg2, 3));
404
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
405
    }
406

    
407
    if (GET_LMASK64(arg2) <= 3) {
408
        tmp = ldfun(GET_OFFSET(arg2, 4));
409
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
410
    }
411

    
412
    if (GET_LMASK64(arg2) <= 2) {
413
        tmp = ldfun(GET_OFFSET(arg2, 5));
414
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
415
    }
416

    
417
    if (GET_LMASK64(arg2) <= 1) {
418
        tmp = ldfun(GET_OFFSET(arg2, 6));
419
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
420
    }
421

    
422
    if (GET_LMASK64(arg2) == 0) {
423
        tmp = ldfun(GET_OFFSET(arg2, 7));
424
        arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
425
    }
426

    
427
    return arg1;
428
}
429

    
430
target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
431
{
432
    uint64_t tmp;
433

    
434
#ifdef CONFIG_USER_ONLY
435
#define ldfun ldub_raw
436
#else
437
    int (*ldfun)(target_ulong);
438

    
439
    switch (mem_idx)
440
    {
441
    case 0: ldfun = ldub_kernel; break;
442
    case 1: ldfun = ldub_super; break;
443
    default:
444
    case 2: ldfun = ldub_user; break;
445
    }
446
#endif
447
    tmp = ldfun(arg2);
448
    arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
449

    
450
    if (GET_LMASK64(arg2) >= 1) {
451
        tmp = ldfun(GET_OFFSET(arg2, -1));
452
        arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
453
    }
454

    
455
    if (GET_LMASK64(arg2) >= 2) {
456
        tmp = ldfun(GET_OFFSET(arg2, -2));
457
        arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
458
    }
459

    
460
    if (GET_LMASK64(arg2) >= 3) {
461
        tmp = ldfun(GET_OFFSET(arg2, -3));
462
        arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
463
    }
464

    
465
    if (GET_LMASK64(arg2) >= 4) {
466
        tmp = ldfun(GET_OFFSET(arg2, -4));
467
        arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
468
    }
469

    
470
    if (GET_LMASK64(arg2) >= 5) {
471
        tmp = ldfun(GET_OFFSET(arg2, -5));
472
        arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
473
    }
474

    
475
    if (GET_LMASK64(arg2) >= 6) {
476
        tmp = ldfun(GET_OFFSET(arg2, -6));
477
        arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
478
    }
479

    
480
    if (GET_LMASK64(arg2) == 7) {
481
        tmp = ldfun(GET_OFFSET(arg2, -7));
482
        arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
483
    }
484

    
485
    return arg1;
486
}
487

    
488
void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
489
{
490
#ifdef CONFIG_USER_ONLY
491
#define stfun stb_raw
492
#else
493
    void (*stfun)(target_ulong, int);
494

    
495
    switch (mem_idx)
496
    {
497
    case 0: stfun = stb_kernel; break;
498
    case 1: stfun = stb_super; break;
499
    default:
500
    case 2: stfun = stb_user; break;
501
    }
502
#endif
503
    stfun(arg2, (uint8_t)(arg1 >> 56));
504

    
505
    if (GET_LMASK64(arg2) <= 6)
506
        stfun(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48));
507

    
508
    if (GET_LMASK64(arg2) <= 5)
509
        stfun(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40));
510

    
511
    if (GET_LMASK64(arg2) <= 4)
512
        stfun(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32));
513

    
514
    if (GET_LMASK64(arg2) <= 3)
515
        stfun(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24));
516

    
517
    if (GET_LMASK64(arg2) <= 2)
518
        stfun(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16));
519

    
520
    if (GET_LMASK64(arg2) <= 1)
521
        stfun(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8));
522

    
523
    if (GET_LMASK64(arg2) <= 0)
524
        stfun(GET_OFFSET(arg2, 7), (uint8_t)arg1);
525
}
526

    
527
void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
528
{
529
#ifdef CONFIG_USER_ONLY
530
#define stfun stb_raw
531
#else
532
    void (*stfun)(target_ulong, int);
533

    
534
    switch (mem_idx)
535
    {
536
    case 0: stfun = stb_kernel; break;
537
    case 1: stfun = stb_super; break;
538
     default:
539
    case 2: stfun = stb_user; break;
540
    }
541
#endif
542
    stfun(arg2, (uint8_t)arg1);
543

    
544
    if (GET_LMASK64(arg2) >= 1)
545
        stfun(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8));
546

    
547
    if (GET_LMASK64(arg2) >= 2)
548
        stfun(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16));
549

    
550
    if (GET_LMASK64(arg2) >= 3)
551
        stfun(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24));
552

    
553
    if (GET_LMASK64(arg2) >= 4)
554
        stfun(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32));
555

    
556
    if (GET_LMASK64(arg2) >= 5)
557
        stfun(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40));
558

    
559
    if (GET_LMASK64(arg2) >= 6)
560
        stfun(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48));
561

    
562
    if (GET_LMASK64(arg2) == 7)
563
        stfun(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56));
564
}
565
#endif /* TARGET_MIPS64 */
566

    
567
#ifndef CONFIG_USER_ONLY
568
/* CP0 helpers */
569
target_ulong helper_mfc0_mvpcontrol (void)
570
{
571
    return env->mvp->CP0_MVPControl;
572
}
573

    
574
target_ulong helper_mfc0_mvpconf0 (void)
575
{
576
    return env->mvp->CP0_MVPConf0;
577
}
578

    
579
target_ulong helper_mfc0_mvpconf1 (void)
580
{
581
    return env->mvp->CP0_MVPConf1;
582
}
583

    
584
target_ulong helper_mfc0_random (void)
585
{
586
    return (int32_t)cpu_mips_get_random(env);
587
}
588

    
589
target_ulong helper_mfc0_tcstatus (void)
590
{
591
    return env->active_tc.CP0_TCStatus;
592
}
593

    
594
target_ulong helper_mftc0_tcstatus(void)
595
{
596
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
597

    
598
    if (other_tc == env->current_tc)
599
        return env->active_tc.CP0_TCStatus;
600
    else
601
        return env->tcs[other_tc].CP0_TCStatus;
602
}
603

    
604
target_ulong helper_mfc0_tcbind (void)
605
{
606
    return env->active_tc.CP0_TCBind;
607
}
608

    
609
target_ulong helper_mftc0_tcbind(void)
610
{
611
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
612

    
613
    if (other_tc == env->current_tc)
614
        return env->active_tc.CP0_TCBind;
615
    else
616
        return env->tcs[other_tc].CP0_TCBind;
617
}
618

    
619
target_ulong helper_mfc0_tcrestart (void)
620
{
621
    return env->active_tc.PC;
622
}
623

    
624
target_ulong helper_mftc0_tcrestart(void)
625
{
626
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
627

    
628
    if (other_tc == env->current_tc)
629
        return env->active_tc.PC;
630
    else
631
        return env->tcs[other_tc].PC;
632
}
633

    
634
target_ulong helper_mfc0_tchalt (void)
635
{
636
    return env->active_tc.CP0_TCHalt;
637
}
638

    
639
target_ulong helper_mftc0_tchalt(void)
640
{
641
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
642

    
643
    if (other_tc == env->current_tc)
644
        return env->active_tc.CP0_TCHalt;
645
    else
646
        return env->tcs[other_tc].CP0_TCHalt;
647
}
648

    
649
target_ulong helper_mfc0_tccontext (void)
650
{
651
    return env->active_tc.CP0_TCContext;
652
}
653

    
654
target_ulong helper_mftc0_tccontext(void)
655
{
656
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
657

    
658
    if (other_tc == env->current_tc)
659
        return env->active_tc.CP0_TCContext;
660
    else
661
        return env->tcs[other_tc].CP0_TCContext;
662
}
663

    
664
target_ulong helper_mfc0_tcschedule (void)
665
{
666
    return env->active_tc.CP0_TCSchedule;
667
}
668

    
669
target_ulong helper_mftc0_tcschedule(void)
670
{
671
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
672

    
673
    if (other_tc == env->current_tc)
674
        return env->active_tc.CP0_TCSchedule;
675
    else
676
        return env->tcs[other_tc].CP0_TCSchedule;
677
}
678

    
679
target_ulong helper_mfc0_tcschefback (void)
680
{
681
    return env->active_tc.CP0_TCScheFBack;
682
}
683

    
684
target_ulong helper_mftc0_tcschefback(void)
685
{
686
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
687

    
688
    if (other_tc == env->current_tc)
689
        return env->active_tc.CP0_TCScheFBack;
690
    else
691
        return env->tcs[other_tc].CP0_TCScheFBack;
692
}
693

    
694
target_ulong helper_mfc0_count (void)
695
{
696
    return (int32_t)cpu_mips_get_count(env);
697
}
698

    
699
target_ulong helper_mftc0_entryhi(void)
700
{
701
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
702
    int32_t tcstatus;
703

    
704
    if (other_tc == env->current_tc)
705
        tcstatus = env->active_tc.CP0_TCStatus;
706
    else
707
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
708

    
709
    return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
710
}
711

    
712
target_ulong helper_mftc0_status(void)
713
{
714
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
715
    target_ulong t0;
716
    int32_t tcstatus;
717

    
718
    if (other_tc == env->current_tc)
719
        tcstatus = env->active_tc.CP0_TCStatus;
720
    else
721
        tcstatus = env->tcs[other_tc].CP0_TCStatus;
722

    
723
    t0 = env->CP0_Status & ~0xf1000018;
724
    t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
725
    t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
726
    t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
727

    
728
    return t0;
729
}
730

    
731
target_ulong helper_mfc0_lladdr (void)
732
{
733
    return (int32_t)env->CP0_LLAddr >> 4;
734
}
735

    
736
target_ulong helper_mfc0_watchlo (uint32_t sel)
737
{
738
    return (int32_t)env->CP0_WatchLo[sel];
739
}
740

    
741
target_ulong helper_mfc0_watchhi (uint32_t sel)
742
{
743
    return env->CP0_WatchHi[sel];
744
}
745

    
746
target_ulong helper_mfc0_debug (void)
747
{
748
    target_ulong t0 = env->CP0_Debug;
749
    if (env->hflags & MIPS_HFLAG_DM)
750
        t0 |= 1 << CP0DB_DM;
751

    
752
    return t0;
753
}
754

    
755
target_ulong helper_mftc0_debug(void)
756
{
757
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
758
    int32_t tcstatus;
759

    
760
    if (other_tc == env->current_tc)
761
        tcstatus = env->active_tc.CP0_Debug_tcstatus;
762
    else
763
        tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
764

    
765
    /* XXX: Might be wrong, check with EJTAG spec. */
766
    return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
767
            (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
768
}
769

    
770
#if defined(TARGET_MIPS64)
771
target_ulong helper_dmfc0_tcrestart (void)
772
{
773
    return env->active_tc.PC;
774
}
775

    
776
target_ulong helper_dmfc0_tchalt (void)
777
{
778
    return env->active_tc.CP0_TCHalt;
779
}
780

    
781
target_ulong helper_dmfc0_tccontext (void)
782
{
783
    return env->active_tc.CP0_TCContext;
784
}
785

    
786
target_ulong helper_dmfc0_tcschedule (void)
787
{
788
    return env->active_tc.CP0_TCSchedule;
789
}
790

    
791
target_ulong helper_dmfc0_tcschefback (void)
792
{
793
    return env->active_tc.CP0_TCScheFBack;
794
}
795

    
796
target_ulong helper_dmfc0_lladdr (void)
797
{
798
    return env->CP0_LLAddr >> 4;
799
}
800

    
801
target_ulong helper_dmfc0_watchlo (uint32_t sel)
802
{
803
    return env->CP0_WatchLo[sel];
804
}
805
#endif /* TARGET_MIPS64 */
806

    
807
void helper_mtc0_index (target_ulong arg1)
808
{
809
    int num = 1;
810
    unsigned int tmp = env->tlb->nb_tlb;
811

    
812
    do {
813
        tmp >>= 1;
814
        num <<= 1;
815
    } while (tmp);
816
    env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
817
}
818

    
819
void helper_mtc0_mvpcontrol (target_ulong arg1)
820
{
821
    uint32_t mask = 0;
822
    uint32_t newval;
823

    
824
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
825
        mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
826
                (1 << CP0MVPCo_EVP);
827
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
828
        mask |= (1 << CP0MVPCo_STLB);
829
    newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
830

    
831
    // TODO: Enable/disable shared TLB, enable/disable VPEs.
832

    
833
    env->mvp->CP0_MVPControl = newval;
834
}
835

    
836
void helper_mtc0_vpecontrol (target_ulong arg1)
837
{
838
    uint32_t mask;
839
    uint32_t newval;
840

    
841
    mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
842
           (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
843
    newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
844

    
845
    /* Yield scheduler intercept not implemented. */
846
    /* Gating storage scheduler intercept not implemented. */
847

    
848
    // TODO: Enable/disable TCs.
849

    
850
    env->CP0_VPEControl = newval;
851
}
852

    
853
void helper_mtc0_vpeconf0 (target_ulong arg1)
854
{
855
    uint32_t mask = 0;
856
    uint32_t newval;
857

    
858
    if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
859
        if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
860
            mask |= (0xff << CP0VPEC0_XTC);
861
        mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
862
    }
863
    newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
864

    
865
    // TODO: TC exclusive handling due to ERL/EXL.
866

    
867
    env->CP0_VPEConf0 = newval;
868
}
869

    
870
void helper_mtc0_vpeconf1 (target_ulong arg1)
871
{
872
    uint32_t mask = 0;
873
    uint32_t newval;
874

    
875
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
876
        mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
877
                (0xff << CP0VPEC1_NCP1);
878
    newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
879

    
880
    /* UDI not implemented. */
881
    /* CP2 not implemented. */
882

    
883
    // TODO: Handle FPU (CP1) binding.
884

    
885
    env->CP0_VPEConf1 = newval;
886
}
887

    
888
void helper_mtc0_yqmask (target_ulong arg1)
889
{
890
    /* Yield qualifier inputs not implemented. */
891
    env->CP0_YQMask = 0x00000000;
892
}
893

    
894
void helper_mtc0_vpeopt (target_ulong arg1)
895
{
896
    env->CP0_VPEOpt = arg1 & 0x0000ffff;
897
}
898

    
899
void helper_mtc0_entrylo0 (target_ulong arg1)
900
{
901
    /* Large physaddr (PABITS) not implemented */
902
    /* 1k pages not implemented */
903
    env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
904
}
905

    
906
void helper_mtc0_tcstatus (target_ulong arg1)
907
{
908
    uint32_t mask = env->CP0_TCStatus_rw_bitmask;
909
    uint32_t newval;
910

    
911
    newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
912

    
913
    // TODO: Sync with CP0_Status.
914

    
915
    env->active_tc.CP0_TCStatus = newval;
916
}
917

    
918
void helper_mttc0_tcstatus (target_ulong arg1)
919
{
920
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
921

    
922
    // TODO: Sync with CP0_Status.
923

    
924
    if (other_tc == env->current_tc)
925
        env->active_tc.CP0_TCStatus = arg1;
926
    else
927
        env->tcs[other_tc].CP0_TCStatus = arg1;
928
}
929

    
930
void helper_mtc0_tcbind (target_ulong arg1)
931
{
932
    uint32_t mask = (1 << CP0TCBd_TBE);
933
    uint32_t newval;
934

    
935
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
936
        mask |= (1 << CP0TCBd_CurVPE);
937
    newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
938
    env->active_tc.CP0_TCBind = newval;
939
}
940

    
941
void helper_mttc0_tcbind (target_ulong arg1)
942
{
943
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
944
    uint32_t mask = (1 << CP0TCBd_TBE);
945
    uint32_t newval;
946

    
947
    if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
948
        mask |= (1 << CP0TCBd_CurVPE);
949
    if (other_tc == env->current_tc) {
950
        newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
951
        env->active_tc.CP0_TCBind = newval;
952
    } else {
953
        newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
954
        env->tcs[other_tc].CP0_TCBind = newval;
955
    }
956
}
957

    
958
void helper_mtc0_tcrestart (target_ulong arg1)
959
{
960
    env->active_tc.PC = arg1;
961
    env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
962
    env->CP0_LLAddr = 0ULL;
963
    /* MIPS16 not implemented. */
964
}
965

    
966
void helper_mttc0_tcrestart (target_ulong arg1)
967
{
968
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
969

    
970
    if (other_tc == env->current_tc) {
971
        env->active_tc.PC = arg1;
972
        env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
973
        env->CP0_LLAddr = 0ULL;
974
        /* MIPS16 not implemented. */
975
    } else {
976
        env->tcs[other_tc].PC = arg1;
977
        env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
978
        env->CP0_LLAddr = 0ULL;
979
        /* MIPS16 not implemented. */
980
    }
981
}
982

    
983
void helper_mtc0_tchalt (target_ulong arg1)
984
{
985
    env->active_tc.CP0_TCHalt = arg1 & 0x1;
986

    
987
    // TODO: Halt TC / Restart (if allocated+active) TC.
988
}
989

    
990
void helper_mttc0_tchalt (target_ulong arg1)
991
{
992
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
993

    
994
    // TODO: Halt TC / Restart (if allocated+active) TC.
995

    
996
    if (other_tc == env->current_tc)
997
        env->active_tc.CP0_TCHalt = arg1;
998
    else
999
        env->tcs[other_tc].CP0_TCHalt = arg1;
1000
}
1001

    
1002
void helper_mtc0_tccontext (target_ulong arg1)
1003
{
1004
    env->active_tc.CP0_TCContext = arg1;
1005
}
1006

    
1007
void helper_mttc0_tccontext (target_ulong arg1)
1008
{
1009
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1010

    
1011
    if (other_tc == env->current_tc)
1012
        env->active_tc.CP0_TCContext = arg1;
1013
    else
1014
        env->tcs[other_tc].CP0_TCContext = arg1;
1015
}
1016

    
1017
void helper_mtc0_tcschedule (target_ulong arg1)
1018
{
1019
    env->active_tc.CP0_TCSchedule = arg1;
1020
}
1021

    
1022
void helper_mttc0_tcschedule (target_ulong arg1)
1023
{
1024
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1025

    
1026
    if (other_tc == env->current_tc)
1027
        env->active_tc.CP0_TCSchedule = arg1;
1028
    else
1029
        env->tcs[other_tc].CP0_TCSchedule = arg1;
1030
}
1031

    
1032
void helper_mtc0_tcschefback (target_ulong arg1)
1033
{
1034
    env->active_tc.CP0_TCScheFBack = arg1;
1035
}
1036

    
1037
void helper_mttc0_tcschefback (target_ulong arg1)
1038
{
1039
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1040

    
1041
    if (other_tc == env->current_tc)
1042
        env->active_tc.CP0_TCScheFBack = arg1;
1043
    else
1044
        env->tcs[other_tc].CP0_TCScheFBack = arg1;
1045
}
1046

    
1047
void helper_mtc0_entrylo1 (target_ulong arg1)
1048
{
1049
    /* Large physaddr (PABITS) not implemented */
1050
    /* 1k pages not implemented */
1051
    env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1052
}
1053

    
1054
void helper_mtc0_context (target_ulong arg1)
1055
{
1056
    env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1057
}
1058

    
1059
void helper_mtc0_pagemask (target_ulong arg1)
1060
{
1061
    /* 1k pages not implemented */
1062
    env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1063
}
1064

    
1065
void helper_mtc0_pagegrain (target_ulong arg1)
1066
{
1067
    /* SmartMIPS not implemented */
1068
    /* Large physaddr (PABITS) not implemented */
1069
    /* 1k pages not implemented */
1070
    env->CP0_PageGrain = 0;
1071
}
1072

    
1073
void helper_mtc0_wired (target_ulong arg1)
1074
{
1075
    env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1076
}
1077

    
1078
void helper_mtc0_srsconf0 (target_ulong arg1)
1079
{
1080
    env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1081
}
1082

    
1083
void helper_mtc0_srsconf1 (target_ulong arg1)
1084
{
1085
    env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1086
}
1087

    
1088
void helper_mtc0_srsconf2 (target_ulong arg1)
1089
{
1090
    env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1091
}
1092

    
1093
void helper_mtc0_srsconf3 (target_ulong arg1)
1094
{
1095
    env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1096
}
1097

    
1098
void helper_mtc0_srsconf4 (target_ulong arg1)
1099
{
1100
    env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1101
}
1102

    
1103
void helper_mtc0_hwrena (target_ulong arg1)
1104
{
1105
    env->CP0_HWREna = arg1 & 0x0000000F;
1106
}
1107

    
1108
void helper_mtc0_count (target_ulong arg1)
1109
{
1110
    cpu_mips_store_count(env, arg1);
1111
}
1112

    
1113
void helper_mtc0_entryhi (target_ulong arg1)
1114
{
1115
    target_ulong old, val;
1116

    
1117
    /* 1k pages not implemented */
1118
    val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1119
#if defined(TARGET_MIPS64)
1120
    val &= env->SEGMask;
1121
#endif
1122
    old = env->CP0_EntryHi;
1123
    env->CP0_EntryHi = val;
1124
    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1125
        uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
1126
        env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
1127
    }
1128
    /* If the ASID changes, flush qemu's TLB.  */
1129
    if ((old & 0xFF) != (val & 0xFF))
1130
        cpu_mips_tlb_flush(env, 1);
1131
}
1132

    
1133
void helper_mttc0_entryhi(target_ulong arg1)
1134
{
1135
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1136
    int32_t tcstatus;
1137

    
1138
    env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (arg1 & ~0xff);
1139
    if (other_tc == env->current_tc) {
1140
        tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1141
        env->active_tc.CP0_TCStatus = tcstatus;
1142
    } else {
1143
        tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1144
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1145
    }
1146
}
1147

    
1148
void helper_mtc0_compare (target_ulong arg1)
1149
{
1150
    cpu_mips_store_compare(env, arg1);
1151
}
1152

    
1153
void helper_mtc0_status (target_ulong arg1)
1154
{
1155
    uint32_t val, old;
1156
    uint32_t mask = env->CP0_Status_rw_bitmask;
1157

    
1158
    val = arg1 & mask;
1159
    old = env->CP0_Status;
1160
    env->CP0_Status = (env->CP0_Status & ~mask) | val;
1161
    compute_hflags(env);
1162
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1163
        qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1164
                old, old & env->CP0_Cause & CP0Ca_IP_mask,
1165
                val, val & env->CP0_Cause & CP0Ca_IP_mask,
1166
                env->CP0_Cause);
1167
        switch (env->hflags & MIPS_HFLAG_KSU) {
1168
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1169
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1170
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1171
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1172
        }
1173
    }
1174
    cpu_mips_update_irq(env);
1175
}
1176

    
1177
void helper_mttc0_status(target_ulong arg1)
1178
{
1179
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1180
    int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
1181

    
1182
    env->CP0_Status = arg1 & ~0xf1000018;
1183
    tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (arg1 & (0xf << CP0St_CU0));
1184
    tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((arg1 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
1185
    tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((arg1 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
1186
    if (other_tc == env->current_tc)
1187
        env->active_tc.CP0_TCStatus = tcstatus;
1188
    else
1189
        env->tcs[other_tc].CP0_TCStatus = tcstatus;
1190
}
1191

    
1192
void helper_mtc0_intctl (target_ulong arg1)
1193
{
1194
    /* vectored interrupts not implemented, no performance counters. */
1195
    env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (arg1 & 0x000002e0);
1196
}
1197

    
1198
void helper_mtc0_srsctl (target_ulong arg1)
1199
{
1200
    uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1201
    env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1202
}
1203

    
1204
void helper_mtc0_cause (target_ulong arg1)
1205
{
1206
    uint32_t mask = 0x00C00300;
1207
    uint32_t old = env->CP0_Cause;
1208

    
1209
    if (env->insn_flags & ISA_MIPS32R2)
1210
        mask |= 1 << CP0Ca_DC;
1211

    
1212
    env->CP0_Cause = (env->CP0_Cause & ~mask) | (arg1 & mask);
1213

    
1214
    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1215
        if (env->CP0_Cause & (1 << CP0Ca_DC))
1216
            cpu_mips_stop_count(env);
1217
        else
1218
            cpu_mips_start_count(env);
1219
    }
1220

    
1221
    /* Handle the software interrupt as an hardware one, as they
1222
       are very similar */
1223
    if (arg1 & CP0Ca_IP_mask) {
1224
        cpu_mips_update_irq(env);
1225
    }
1226
}
1227

    
1228
void helper_mtc0_ebase (target_ulong arg1)
1229
{
1230
    /* vectored interrupts not implemented */
1231
    /* Multi-CPU not implemented */
1232
    env->CP0_EBase = 0x80000000 | (arg1 & 0x3FFFF000);
1233
}
1234

    
1235
void helper_mtc0_config0 (target_ulong arg1)
1236
{
1237
    env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1238
}
1239

    
1240
void helper_mtc0_config2 (target_ulong arg1)
1241
{
1242
    /* tertiary/secondary caches not implemented */
1243
    env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1244
}
1245

    
1246
void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1247
{
1248
    /* Watch exceptions for instructions, data loads, data stores
1249
       not implemented. */
1250
    env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1251
}
1252

    
1253
void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1254
{
1255
    env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1256
    env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1257
}
1258

    
1259
void helper_mtc0_xcontext (target_ulong arg1)
1260
{
1261
    target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1262
    env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1263
}
1264

    
1265
void helper_mtc0_framemask (target_ulong arg1)
1266
{
1267
    env->CP0_Framemask = arg1; /* XXX */
1268
}
1269

    
1270
void helper_mtc0_debug (target_ulong arg1)
1271
{
1272
    env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1273
    if (arg1 & (1 << CP0DB_DM))
1274
        env->hflags |= MIPS_HFLAG_DM;
1275
    else
1276
        env->hflags &= ~MIPS_HFLAG_DM;
1277
}
1278

    
1279
void helper_mttc0_debug(target_ulong arg1)
1280
{
1281
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1282
    uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1283

    
1284
    /* XXX: Might be wrong, check with EJTAG spec. */
1285
    if (other_tc == env->current_tc)
1286
        env->active_tc.CP0_Debug_tcstatus = val;
1287
    else
1288
        env->tcs[other_tc].CP0_Debug_tcstatus = val;
1289
    env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1290
                     (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1291
}
1292

    
1293
void helper_mtc0_performance0 (target_ulong arg1)
1294
{
1295
    env->CP0_Performance0 = arg1 & 0x000007ff;
1296
}
1297

    
1298
void helper_mtc0_taglo (target_ulong arg1)
1299
{
1300
    env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1301
}
1302

    
1303
void helper_mtc0_datalo (target_ulong arg1)
1304
{
1305
    env->CP0_DataLo = arg1; /* XXX */
1306
}
1307

    
1308
void helper_mtc0_taghi (target_ulong arg1)
1309
{
1310
    env->CP0_TagHi = arg1; /* XXX */
1311
}
1312

    
1313
void helper_mtc0_datahi (target_ulong arg1)
1314
{
1315
    env->CP0_DataHi = arg1; /* XXX */
1316
}
1317

    
1318
/* MIPS MT functions */
1319
target_ulong helper_mftgpr(uint32_t sel)
1320
{
1321
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1322

    
1323
    if (other_tc == env->current_tc)
1324
        return env->active_tc.gpr[sel];
1325
    else
1326
        return env->tcs[other_tc].gpr[sel];
1327
}
1328

    
1329
target_ulong helper_mftlo(uint32_t sel)
1330
{
1331
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1332

    
1333
    if (other_tc == env->current_tc)
1334
        return env->active_tc.LO[sel];
1335
    else
1336
        return env->tcs[other_tc].LO[sel];
1337
}
1338

    
1339
target_ulong helper_mfthi(uint32_t sel)
1340
{
1341
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1342

    
1343
    if (other_tc == env->current_tc)
1344
        return env->active_tc.HI[sel];
1345
    else
1346
        return env->tcs[other_tc].HI[sel];
1347
}
1348

    
1349
target_ulong helper_mftacx(uint32_t sel)
1350
{
1351
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1352

    
1353
    if (other_tc == env->current_tc)
1354
        return env->active_tc.ACX[sel];
1355
    else
1356
        return env->tcs[other_tc].ACX[sel];
1357
}
1358

    
1359
target_ulong helper_mftdsp(void)
1360
{
1361
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1362

    
1363
    if (other_tc == env->current_tc)
1364
        return env->active_tc.DSPControl;
1365
    else
1366
        return env->tcs[other_tc].DSPControl;
1367
}
1368

    
1369
void helper_mttgpr(target_ulong arg1, uint32_t sel)
1370
{
1371
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1372

    
1373
    if (other_tc == env->current_tc)
1374
        env->active_tc.gpr[sel] = arg1;
1375
    else
1376
        env->tcs[other_tc].gpr[sel] = arg1;
1377
}
1378

    
1379
void helper_mttlo(target_ulong arg1, uint32_t sel)
1380
{
1381
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1382

    
1383
    if (other_tc == env->current_tc)
1384
        env->active_tc.LO[sel] = arg1;
1385
    else
1386
        env->tcs[other_tc].LO[sel] = arg1;
1387
}
1388

    
1389
void helper_mtthi(target_ulong arg1, uint32_t sel)
1390
{
1391
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1392

    
1393
    if (other_tc == env->current_tc)
1394
        env->active_tc.HI[sel] = arg1;
1395
    else
1396
        env->tcs[other_tc].HI[sel] = arg1;
1397
}
1398

    
1399
void helper_mttacx(target_ulong arg1, uint32_t sel)
1400
{
1401
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1402

    
1403
    if (other_tc == env->current_tc)
1404
        env->active_tc.ACX[sel] = arg1;
1405
    else
1406
        env->tcs[other_tc].ACX[sel] = arg1;
1407
}
1408

    
1409
void helper_mttdsp(target_ulong arg1)
1410
{
1411
    int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1412

    
1413
    if (other_tc == env->current_tc)
1414
        env->active_tc.DSPControl = arg1;
1415
    else
1416
        env->tcs[other_tc].DSPControl = arg1;
1417
}
1418

    
1419
/* MIPS MT functions */
1420
target_ulong helper_dmt(target_ulong arg1)
1421
{
1422
    // TODO
1423
    arg1 = 0;
1424
    // rt = arg1
1425

    
1426
    return arg1;
1427
}
1428

    
1429
target_ulong helper_emt(target_ulong arg1)
1430
{
1431
    // TODO
1432
    arg1 = 0;
1433
    // rt = arg1
1434

    
1435
    return arg1;
1436
}
1437

    
1438
target_ulong helper_dvpe(target_ulong arg1)
1439
{
1440
    // TODO
1441
    arg1 = 0;
1442
    // rt = arg1
1443

    
1444
    return arg1;
1445
}
1446

    
1447
target_ulong helper_evpe(target_ulong arg1)
1448
{
1449
    // TODO
1450
    arg1 = 0;
1451
    // rt = arg1
1452

    
1453
    return arg1;
1454
}
1455
#endif /* !CONFIG_USER_ONLY */
1456

    
1457
void helper_fork(target_ulong arg1, target_ulong arg2)
1458
{
1459
    // arg1 = rt, arg2 = rs
1460
    arg1 = 0;
1461
    // TODO: store to TC register
1462
}
1463

    
1464
target_ulong helper_yield(target_ulong arg1)
1465
{
1466
    if (arg1 < 0) {
1467
        /* No scheduling policy implemented. */
1468
        if (arg1 != -2) {
1469
            if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1470
                env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1471
                env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1472
                env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1473
                helper_raise_exception(EXCP_THREAD);
1474
            }
1475
        }
1476
    } else if (arg1 == 0) {
1477
        if (0 /* TODO: TC underflow */) {
1478
            env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1479
            helper_raise_exception(EXCP_THREAD);
1480
        } else {
1481
            // TODO: Deallocate TC
1482
        }
1483
    } else if (arg1 > 0) {
1484
        /* Yield qualifier inputs not implemented. */
1485
        env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1486
        env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1487
        helper_raise_exception(EXCP_THREAD);
1488
    }
1489
    return env->CP0_YQMask;
1490
}
1491

    
1492
#ifndef CONFIG_USER_ONLY
1493
/* TLB management */
1494
void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1495
{
1496
    /* Flush qemu's TLB and discard all shadowed entries.  */
1497
    tlb_flush (env, flush_global);
1498
    env->tlb->tlb_in_use = env->tlb->nb_tlb;
1499
}
1500

    
1501
static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1502
{
1503
    /* Discard entries from env->tlb[first] onwards.  */
1504
    while (env->tlb->tlb_in_use > first) {
1505
        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1506
    }
1507
}
1508

    
1509
static void r4k_fill_tlb (int idx)
1510
{
1511
    r4k_tlb_t *tlb;
1512

    
1513
    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1514
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1515
    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1516
#if defined(TARGET_MIPS64)
1517
    tlb->VPN &= env->SEGMask;
1518
#endif
1519
    tlb->ASID = env->CP0_EntryHi & 0xFF;
1520
    tlb->PageMask = env->CP0_PageMask;
1521
    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1522
    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1523
    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1524
    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1525
    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1526
    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1527
    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1528
    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1529
    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1530
}
1531

    
1532
void r4k_helper_tlbwi (void)
1533
{
1534
    int idx;
1535

    
1536
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1537

    
1538
    /* Discard cached TLB entries.  We could avoid doing this if the
1539
       tlbwi is just upgrading access permissions on the current entry;
1540
       that might be a further win.  */
1541
    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1542

    
1543
    r4k_invalidate_tlb(env, idx, 0);
1544
    r4k_fill_tlb(idx);
1545
}
1546

    
1547
void r4k_helper_tlbwr (void)
1548
{
1549
    int r = cpu_mips_get_random(env);
1550

    
1551
    r4k_invalidate_tlb(env, r, 1);
1552
    r4k_fill_tlb(r);
1553
}
1554

    
1555
void r4k_helper_tlbp (void)
1556
{
1557
    r4k_tlb_t *tlb;
1558
    target_ulong mask;
1559
    target_ulong tag;
1560
    target_ulong VPN;
1561
    uint8_t ASID;
1562
    int i;
1563

    
1564
    ASID = env->CP0_EntryHi & 0xFF;
1565
    for (i = 0; i < env->tlb->nb_tlb; i++) {
1566
        tlb = &env->tlb->mmu.r4k.tlb[i];
1567
        /* 1k pages are not supported. */
1568
        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1569
        tag = env->CP0_EntryHi & ~mask;
1570
        VPN = tlb->VPN & ~mask;
1571
        /* Check ASID, virtual page number & size */
1572
        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1573
            /* TLB match */
1574
            env->CP0_Index = i;
1575
            break;
1576
        }
1577
    }
1578
    if (i == env->tlb->nb_tlb) {
1579
        /* No match.  Discard any shadow entries, if any of them match.  */
1580
        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1581
            tlb = &env->tlb->mmu.r4k.tlb[i];
1582
            /* 1k pages are not supported. */
1583
            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1584
            tag = env->CP0_EntryHi & ~mask;
1585
            VPN = tlb->VPN & ~mask;
1586
            /* Check ASID, virtual page number & size */
1587
            if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1588
                r4k_mips_tlb_flush_extra (env, i);
1589
                break;
1590
            }
1591
        }
1592

    
1593
        env->CP0_Index |= 0x80000000;
1594
    }
1595
}
1596

    
1597
void r4k_helper_tlbr (void)
1598
{
1599
    r4k_tlb_t *tlb;
1600
    uint8_t ASID;
1601
    int idx;
1602

    
1603
    ASID = env->CP0_EntryHi & 0xFF;
1604
    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1605
    tlb = &env->tlb->mmu.r4k.tlb[idx];
1606

    
1607
    /* If this will change the current ASID, flush qemu's TLB.  */
1608
    if (ASID != tlb->ASID)
1609
        cpu_mips_tlb_flush (env, 1);
1610

    
1611
    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1612

    
1613
    env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1614
    env->CP0_PageMask = tlb->PageMask;
1615
    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1616
                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1617
    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1618
                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1619
}
1620

    
1621
void helper_tlbwi(void)
1622
{
1623
    env->tlb->helper_tlbwi();
1624
}
1625

    
1626
void helper_tlbwr(void)
1627
{
1628
    env->tlb->helper_tlbwr();
1629
}
1630

    
1631
void helper_tlbp(void)
1632
{
1633
    env->tlb->helper_tlbp();
1634
}
1635

    
1636
void helper_tlbr(void)
1637
{
1638
    env->tlb->helper_tlbr();
1639
}
1640

    
1641
/* Specials */
1642
target_ulong helper_di (void)
1643
{
1644
    target_ulong t0 = env->CP0_Status;
1645

    
1646
    env->CP0_Status = t0 & ~(1 << CP0St_IE);
1647
    cpu_mips_update_irq(env);
1648

    
1649
    return t0;
1650
}
1651

    
1652
target_ulong helper_ei (void)
1653
{
1654
    target_ulong t0 = env->CP0_Status;
1655

    
1656
    env->CP0_Status = t0 | (1 << CP0St_IE);
1657
    cpu_mips_update_irq(env);
1658

    
1659
    return t0;
1660
}
1661

    
1662
static void debug_pre_eret (void)
1663
{
1664
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1665
        qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1666
                env->active_tc.PC, env->CP0_EPC);
1667
        if (env->CP0_Status & (1 << CP0St_ERL))
1668
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1669
        if (env->hflags & MIPS_HFLAG_DM)
1670
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1671
        qemu_log("\n");
1672
    }
1673
}
1674

    
1675
static void debug_post_eret (void)
1676
{
1677
    if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1678
        qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1679
                env->active_tc.PC, env->CP0_EPC);
1680
        if (env->CP0_Status & (1 << CP0St_ERL))
1681
            qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1682
        if (env->hflags & MIPS_HFLAG_DM)
1683
            qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1684
        switch (env->hflags & MIPS_HFLAG_KSU) {
1685
        case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1686
        case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1687
        case MIPS_HFLAG_KM: qemu_log("\n"); break;
1688
        default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1689
        }
1690
    }
1691
}
1692

    
1693
void helper_eret (void)
1694
{
1695
    debug_pre_eret();
1696
    if (env->CP0_Status & (1 << CP0St_ERL)) {
1697
        env->active_tc.PC = env->CP0_ErrorEPC;
1698
        env->CP0_Status &= ~(1 << CP0St_ERL);
1699
    } else {
1700
        env->active_tc.PC = env->CP0_EPC;
1701
        env->CP0_Status &= ~(1 << CP0St_EXL);
1702
    }
1703
    compute_hflags(env);
1704
    debug_post_eret();
1705
    env->CP0_LLAddr = 1;
1706
}
1707

    
1708
void helper_deret (void)
1709
{
1710
    debug_pre_eret();
1711
    env->active_tc.PC = env->CP0_DEPC;
1712
    env->hflags &= MIPS_HFLAG_DM;
1713
    compute_hflags(env);
1714
    debug_post_eret();
1715
    env->CP0_LLAddr = 1;
1716
}
1717
#endif /* !CONFIG_USER_ONLY */
1718

    
1719
target_ulong helper_rdhwr_cpunum(void)
1720
{
1721
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1722
        (env->CP0_HWREna & (1 << 0)))
1723
        return env->CP0_EBase & 0x3ff;
1724
    else
1725
        helper_raise_exception(EXCP_RI);
1726

    
1727
    return 0;
1728
}
1729

    
1730
target_ulong helper_rdhwr_synci_step(void)
1731
{
1732
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1733
        (env->CP0_HWREna & (1 << 1)))
1734
        return env->SYNCI_Step;
1735
    else
1736
        helper_raise_exception(EXCP_RI);
1737

    
1738
    return 0;
1739
}
1740

    
1741
target_ulong helper_rdhwr_cc(void)
1742
{
1743
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1744
        (env->CP0_HWREna & (1 << 2)))
1745
        return env->CP0_Count;
1746
    else
1747
        helper_raise_exception(EXCP_RI);
1748

    
1749
    return 0;
1750
}
1751

    
1752
target_ulong helper_rdhwr_ccres(void)
1753
{
1754
    if ((env->hflags & MIPS_HFLAG_CP0) ||
1755
        (env->CP0_HWREna & (1 << 3)))
1756
        return env->CCRes;
1757
    else
1758
        helper_raise_exception(EXCP_RI);
1759

    
1760
    return 0;
1761
}
1762

    
1763
void helper_pmon (int function)
1764
{
1765
    function /= 2;
1766
    switch (function) {
1767
    case 2: /* TODO: char inbyte(int waitflag); */
1768
        if (env->active_tc.gpr[4] == 0)
1769
            env->active_tc.gpr[2] = -1;
1770
        /* Fall through */
1771
    case 11: /* TODO: char inbyte (void); */
1772
        env->active_tc.gpr[2] = -1;
1773
        break;
1774
    case 3:
1775
    case 12:
1776
        printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1777
        break;
1778
    case 17:
1779
        break;
1780
    case 158:
1781
        {
1782
            unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
1783
            printf("%s", fmt);
1784
        }
1785
        break;
1786
    }
1787
}
1788

    
1789
void helper_wait (void)
1790
{
1791
    env->halted = 1;
1792
    helper_raise_exception(EXCP_HLT);
1793
}
1794

    
1795
#if !defined(CONFIG_USER_ONLY)
1796

    
1797
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1798

    
1799
#define MMUSUFFIX _mmu
1800
#define ALIGNED_ONLY
1801

    
1802
#define SHIFT 0
1803
#include "softmmu_template.h"
1804

    
1805
#define SHIFT 1
1806
#include "softmmu_template.h"
1807

    
1808
#define SHIFT 2
1809
#include "softmmu_template.h"
1810

    
1811
#define SHIFT 3
1812
#include "softmmu_template.h"
1813

    
1814
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
1815
{
1816
    env->CP0_BadVAddr = addr;
1817
    do_restore_state (retaddr);
1818
    helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
1819
}
1820

    
1821
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1822
{
1823
    TranslationBlock *tb;
1824
    CPUState *saved_env;
1825
    unsigned long pc;
1826
    int ret;
1827

    
1828
    /* XXX: hack to restore env in all cases, even if not called from
1829
       generated code */
1830
    saved_env = env;
1831
    env = cpu_single_env;
1832
    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1833
    if (ret) {
1834
        if (retaddr) {
1835
            /* now we have a real cpu fault */
1836
            pc = (unsigned long)retaddr;
1837
            tb = tb_find_pc(pc);
1838
            if (tb) {
1839
                /* the PC is inside the translated code. It means that we have
1840
                   a virtual CPU fault */
1841
                cpu_restore_state(tb, env, pc, NULL);
1842
            }
1843
        }
1844
        helper_raise_exception_err(env->exception_index, env->error_code);
1845
    }
1846
    env = saved_env;
1847
}
1848

    
1849
void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
1850
                          int unused, int size)
1851
{
1852
    if (is_exec)
1853
        helper_raise_exception(EXCP_IBE);
1854
    else
1855
        helper_raise_exception(EXCP_DBE);
1856
}
1857
#endif /* !CONFIG_USER_ONLY */
1858

    
1859
/* Complex FPU operations which may need stack space. */
1860

    
1861
#define FLOAT_ONE32 make_float32(0x3f8 << 20)
1862
#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
1863
#define FLOAT_TWO32 make_float32(1 << 30)
1864
#define FLOAT_TWO64 make_float64(1ULL << 62)
1865
#define FLOAT_QNAN32 0x7fbfffff
1866
#define FLOAT_QNAN64 0x7ff7ffffffffffffULL
1867
#define FLOAT_SNAN32 0x7fffffff
1868
#define FLOAT_SNAN64 0x7fffffffffffffffULL
1869

    
1870
/* convert MIPS rounding mode in FCR31 to IEEE library */
1871
static unsigned int ieee_rm[] = {
1872
    float_round_nearest_even,
1873
    float_round_to_zero,
1874
    float_round_up,
1875
    float_round_down
1876
};
1877

    
1878
#define RESTORE_ROUNDING_MODE \
1879
    set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1880

    
1881
#define RESTORE_FLUSH_MODE \
1882
    set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
1883

    
1884
target_ulong helper_cfc1 (uint32_t reg)
1885
{
1886
    target_ulong arg1;
1887

    
1888
    switch (reg) {
1889
    case 0:
1890
        arg1 = (int32_t)env->active_fpu.fcr0;
1891
        break;
1892
    case 25:
1893
        arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
1894
        break;
1895
    case 26:
1896
        arg1 = env->active_fpu.fcr31 & 0x0003f07c;
1897
        break;
1898
    case 28:
1899
        arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
1900
        break;
1901
    default:
1902
        arg1 = (int32_t)env->active_fpu.fcr31;
1903
        break;
1904
    }
1905

    
1906
    return arg1;
1907
}
1908

    
1909
void helper_ctc1 (target_ulong arg1, uint32_t reg)
1910
{
1911
    switch(reg) {
1912
    case 25:
1913
        if (arg1 & 0xffffff00)
1914
            return;
1915
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
1916
                     ((arg1 & 0x1) << 23);
1917
        break;
1918
    case 26:
1919
        if (arg1 & 0x007c0000)
1920
            return;
1921
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
1922
        break;
1923
    case 28:
1924
        if (arg1 & 0x007c0000)
1925
            return;
1926
        env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
1927
                     ((arg1 & 0x4) << 22);
1928
        break;
1929
    case 31:
1930
        if (arg1 & 0x007c0000)
1931
            return;
1932
        env->active_fpu.fcr31 = arg1;
1933
        break;
1934
    default:
1935
        return;
1936
    }
1937
    /* set rounding mode */
1938
    RESTORE_ROUNDING_MODE;
1939
    /* set flush-to-zero mode */
1940
    RESTORE_FLUSH_MODE;
1941
    set_float_exception_flags(0, &env->active_fpu.fp_status);
1942
    if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
1943
        helper_raise_exception(EXCP_FPE);
1944
}
1945

    
1946
static inline char ieee_ex_to_mips(char xcpt)
1947
{
1948
    return (xcpt & float_flag_inexact) >> 5 |
1949
           (xcpt & float_flag_underflow) >> 3 |
1950
           (xcpt & float_flag_overflow) >> 1 |
1951
           (xcpt & float_flag_divbyzero) << 1 |
1952
           (xcpt & float_flag_invalid) << 4;
1953
}
1954

    
1955
static inline char mips_ex_to_ieee(char xcpt)
1956
{
1957
    return (xcpt & FP_INEXACT) << 5 |
1958
           (xcpt & FP_UNDERFLOW) << 3 |
1959
           (xcpt & FP_OVERFLOW) << 1 |
1960
           (xcpt & FP_DIV0) >> 1 |
1961
           (xcpt & FP_INVALID) >> 4;
1962
}
1963

    
1964
static inline void update_fcr31(void)
1965
{
1966
    int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
1967

    
1968
    SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
1969
    if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
1970
        helper_raise_exception(EXCP_FPE);
1971
    else
1972
        UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
1973
}
1974

    
1975
/* Float support.
1976
   Single precition routines have a "s" suffix, double precision a
1977
   "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
1978
   paired single lower "pl", paired single upper "pu".  */
1979

    
1980
/* unary operations, modifying fp status  */
1981
uint64_t helper_float_sqrt_d(uint64_t fdt0)
1982
{
1983
    return float64_sqrt(fdt0, &env->active_fpu.fp_status);
1984
}
1985

    
1986
uint32_t helper_float_sqrt_s(uint32_t fst0)
1987
{
1988
    return float32_sqrt(fst0, &env->active_fpu.fp_status);
1989
}
1990

    
1991
uint64_t helper_float_cvtd_s(uint32_t fst0)
1992
{
1993
    uint64_t fdt2;
1994

    
1995
    set_float_exception_flags(0, &env->active_fpu.fp_status);
1996
    fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
1997
    update_fcr31();
1998
    return fdt2;
1999
}
2000

    
2001
uint64_t helper_float_cvtd_w(uint32_t wt0)
2002
{
2003
    uint64_t fdt2;
2004

    
2005
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2006
    fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2007
    update_fcr31();
2008
    return fdt2;
2009
}
2010

    
2011
uint64_t helper_float_cvtd_l(uint64_t dt0)
2012
{
2013
    uint64_t fdt2;
2014

    
2015
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2016
    fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2017
    update_fcr31();
2018
    return fdt2;
2019
}
2020

    
2021
uint64_t helper_float_cvtl_d(uint64_t fdt0)
2022
{
2023
    uint64_t dt2;
2024

    
2025
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2026
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2027
    update_fcr31();
2028
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2029
        dt2 = FLOAT_SNAN64;
2030
    return dt2;
2031
}
2032

    
2033
uint64_t helper_float_cvtl_s(uint32_t fst0)
2034
{
2035
    uint64_t dt2;
2036

    
2037
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2038
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2039
    update_fcr31();
2040
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2041
        dt2 = FLOAT_SNAN64;
2042
    return dt2;
2043
}
2044

    
2045
uint64_t helper_float_cvtps_pw(uint64_t dt0)
2046
{
2047
    uint32_t fst2;
2048
    uint32_t fsth2;
2049

    
2050
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2051
    fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2052
    fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2053
    update_fcr31();
2054
    return ((uint64_t)fsth2 << 32) | fst2;
2055
}
2056

    
2057
uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2058
{
2059
    uint32_t wt2;
2060
    uint32_t wth2;
2061

    
2062
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2063
    wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2064
    wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2065
    update_fcr31();
2066
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2067
        wt2 = FLOAT_SNAN32;
2068
        wth2 = FLOAT_SNAN32;
2069
    }
2070
    return ((uint64_t)wth2 << 32) | wt2;
2071
}
2072

    
2073
uint32_t helper_float_cvts_d(uint64_t fdt0)
2074
{
2075
    uint32_t fst2;
2076

    
2077
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2078
    fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2079
    update_fcr31();
2080
    return fst2;
2081
}
2082

    
2083
uint32_t helper_float_cvts_w(uint32_t wt0)
2084
{
2085
    uint32_t fst2;
2086

    
2087
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2088
    fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2089
    update_fcr31();
2090
    return fst2;
2091
}
2092

    
2093
uint32_t helper_float_cvts_l(uint64_t dt0)
2094
{
2095
    uint32_t fst2;
2096

    
2097
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2098
    fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2099
    update_fcr31();
2100
    return fst2;
2101
}
2102

    
2103
uint32_t helper_float_cvts_pl(uint32_t wt0)
2104
{
2105
    uint32_t wt2;
2106

    
2107
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2108
    wt2 = wt0;
2109
    update_fcr31();
2110
    return wt2;
2111
}
2112

    
2113
uint32_t helper_float_cvts_pu(uint32_t wth0)
2114
{
2115
    uint32_t wt2;
2116

    
2117
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2118
    wt2 = wth0;
2119
    update_fcr31();
2120
    return wt2;
2121
}
2122

    
2123
uint32_t helper_float_cvtw_s(uint32_t fst0)
2124
{
2125
    uint32_t wt2;
2126

    
2127
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2128
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2129
    update_fcr31();
2130
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2131
        wt2 = FLOAT_SNAN32;
2132
    return wt2;
2133
}
2134

    
2135
uint32_t helper_float_cvtw_d(uint64_t fdt0)
2136
{
2137
    uint32_t wt2;
2138

    
2139
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2140
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2141
    update_fcr31();
2142
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2143
        wt2 = FLOAT_SNAN32;
2144
    return wt2;
2145
}
2146

    
2147
uint64_t helper_float_roundl_d(uint64_t fdt0)
2148
{
2149
    uint64_t dt2;
2150

    
2151
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2152
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2153
    RESTORE_ROUNDING_MODE;
2154
    update_fcr31();
2155
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2156
        dt2 = FLOAT_SNAN64;
2157
    return dt2;
2158
}
2159

    
2160
uint64_t helper_float_roundl_s(uint32_t fst0)
2161
{
2162
    uint64_t dt2;
2163

    
2164
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2165
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2166
    RESTORE_ROUNDING_MODE;
2167
    update_fcr31();
2168
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2169
        dt2 = FLOAT_SNAN64;
2170
    return dt2;
2171
}
2172

    
2173
uint32_t helper_float_roundw_d(uint64_t fdt0)
2174
{
2175
    uint32_t wt2;
2176

    
2177
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2178
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2179
    RESTORE_ROUNDING_MODE;
2180
    update_fcr31();
2181
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2182
        wt2 = FLOAT_SNAN32;
2183
    return wt2;
2184
}
2185

    
2186
uint32_t helper_float_roundw_s(uint32_t fst0)
2187
{
2188
    uint32_t wt2;
2189

    
2190
    set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2191
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2192
    RESTORE_ROUNDING_MODE;
2193
    update_fcr31();
2194
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2195
        wt2 = FLOAT_SNAN32;
2196
    return wt2;
2197
}
2198

    
2199
uint64_t helper_float_truncl_d(uint64_t fdt0)
2200
{
2201
    uint64_t dt2;
2202

    
2203
    dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2204
    update_fcr31();
2205
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2206
        dt2 = FLOAT_SNAN64;
2207
    return dt2;
2208
}
2209

    
2210
uint64_t helper_float_truncl_s(uint32_t fst0)
2211
{
2212
    uint64_t dt2;
2213

    
2214
    dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2215
    update_fcr31();
2216
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2217
        dt2 = FLOAT_SNAN64;
2218
    return dt2;
2219
}
2220

    
2221
uint32_t helper_float_truncw_d(uint64_t fdt0)
2222
{
2223
    uint32_t wt2;
2224

    
2225
    wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2226
    update_fcr31();
2227
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2228
        wt2 = FLOAT_SNAN32;
2229
    return wt2;
2230
}
2231

    
2232
uint32_t helper_float_truncw_s(uint32_t fst0)
2233
{
2234
    uint32_t wt2;
2235

    
2236
    wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2237
    update_fcr31();
2238
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2239
        wt2 = FLOAT_SNAN32;
2240
    return wt2;
2241
}
2242

    
2243
uint64_t helper_float_ceill_d(uint64_t fdt0)
2244
{
2245
    uint64_t dt2;
2246

    
2247
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2248
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2249
    RESTORE_ROUNDING_MODE;
2250
    update_fcr31();
2251
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2252
        dt2 = FLOAT_SNAN64;
2253
    return dt2;
2254
}
2255

    
2256
uint64_t helper_float_ceill_s(uint32_t fst0)
2257
{
2258
    uint64_t dt2;
2259

    
2260
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2261
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2262
    RESTORE_ROUNDING_MODE;
2263
    update_fcr31();
2264
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2265
        dt2 = FLOAT_SNAN64;
2266
    return dt2;
2267
}
2268

    
2269
uint32_t helper_float_ceilw_d(uint64_t fdt0)
2270
{
2271
    uint32_t wt2;
2272

    
2273
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2274
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2275
    RESTORE_ROUNDING_MODE;
2276
    update_fcr31();
2277
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2278
        wt2 = FLOAT_SNAN32;
2279
    return wt2;
2280
}
2281

    
2282
uint32_t helper_float_ceilw_s(uint32_t fst0)
2283
{
2284
    uint32_t wt2;
2285

    
2286
    set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2287
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2288
    RESTORE_ROUNDING_MODE;
2289
    update_fcr31();
2290
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2291
        wt2 = FLOAT_SNAN32;
2292
    return wt2;
2293
}
2294

    
2295
uint64_t helper_float_floorl_d(uint64_t fdt0)
2296
{
2297
    uint64_t dt2;
2298

    
2299
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2300
    dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2301
    RESTORE_ROUNDING_MODE;
2302
    update_fcr31();
2303
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2304
        dt2 = FLOAT_SNAN64;
2305
    return dt2;
2306
}
2307

    
2308
uint64_t helper_float_floorl_s(uint32_t fst0)
2309
{
2310
    uint64_t dt2;
2311

    
2312
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2313
    dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2314
    RESTORE_ROUNDING_MODE;
2315
    update_fcr31();
2316
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2317
        dt2 = FLOAT_SNAN64;
2318
    return dt2;
2319
}
2320

    
2321
uint32_t helper_float_floorw_d(uint64_t fdt0)
2322
{
2323
    uint32_t wt2;
2324

    
2325
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2326
    wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2327
    RESTORE_ROUNDING_MODE;
2328
    update_fcr31();
2329
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2330
        wt2 = FLOAT_SNAN32;
2331
    return wt2;
2332
}
2333

    
2334
uint32_t helper_float_floorw_s(uint32_t fst0)
2335
{
2336
    uint32_t wt2;
2337

    
2338
    set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2339
    wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2340
    RESTORE_ROUNDING_MODE;
2341
    update_fcr31();
2342
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2343
        wt2 = FLOAT_SNAN32;
2344
    return wt2;
2345
}
2346

    
2347
/* unary operations, not modifying fp status  */
2348
#define FLOAT_UNOP(name)                                       \
2349
uint64_t helper_float_ ## name ## _d(uint64_t fdt0)                \
2350
{                                                              \
2351
    return float64_ ## name(fdt0);                             \
2352
}                                                              \
2353
uint32_t helper_float_ ## name ## _s(uint32_t fst0)                \
2354
{                                                              \
2355
    return float32_ ## name(fst0);                             \
2356
}                                                              \
2357
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0)               \
2358
{                                                              \
2359
    uint32_t wt0;                                              \
2360
    uint32_t wth0;                                             \
2361
                                                               \
2362
    wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
2363
    wth0 = float32_ ## name(fdt0 >> 32);                       \
2364
    return ((uint64_t)wth0 << 32) | wt0;                       \
2365
}
2366
FLOAT_UNOP(abs)
2367
FLOAT_UNOP(chs)
2368
#undef FLOAT_UNOP
2369

    
2370
/* MIPS specific unary operations */
2371
uint64_t helper_float_recip_d(uint64_t fdt0)
2372
{
2373
    uint64_t fdt2;
2374

    
2375
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2376
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2377
    update_fcr31();
2378
    return fdt2;
2379
}
2380

    
2381
uint32_t helper_float_recip_s(uint32_t fst0)
2382
{
2383
    uint32_t fst2;
2384

    
2385
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2386
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2387
    update_fcr31();
2388
    return fst2;
2389
}
2390

    
2391
uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2392
{
2393
    uint64_t fdt2;
2394

    
2395
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2396
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2397
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2398
    update_fcr31();
2399
    return fdt2;
2400
}
2401

    
2402
uint32_t helper_float_rsqrt_s(uint32_t fst0)
2403
{
2404
    uint32_t fst2;
2405

    
2406
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2407
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2408
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2409
    update_fcr31();
2410
    return fst2;
2411
}
2412

    
2413
uint64_t helper_float_recip1_d(uint64_t fdt0)
2414
{
2415
    uint64_t fdt2;
2416

    
2417
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2418
    fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2419
    update_fcr31();
2420
    return fdt2;
2421
}
2422

    
2423
uint32_t helper_float_recip1_s(uint32_t fst0)
2424
{
2425
    uint32_t fst2;
2426

    
2427
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2428
    fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2429
    update_fcr31();
2430
    return fst2;
2431
}
2432

    
2433
uint64_t helper_float_recip1_ps(uint64_t fdt0)
2434
{
2435
    uint32_t fst2;
2436
    uint32_t fsth2;
2437

    
2438
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2439
    fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2440
    fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2441
    update_fcr31();
2442
    return ((uint64_t)fsth2 << 32) | fst2;
2443
}
2444

    
2445
uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2446
{
2447
    uint64_t fdt2;
2448

    
2449
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2450
    fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2451
    fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2452
    update_fcr31();
2453
    return fdt2;
2454
}
2455

    
2456
uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2457
{
2458
    uint32_t fst2;
2459

    
2460
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2461
    fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2462
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2463
    update_fcr31();
2464
    return fst2;
2465
}
2466

    
2467
uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2468
{
2469
    uint32_t fst2;
2470
    uint32_t fsth2;
2471

    
2472
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2473
    fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2474
    fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2475
    fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2476
    fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2477
    update_fcr31();
2478
    return ((uint64_t)fsth2 << 32) | fst2;
2479
}
2480

    
2481
#define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2482

    
2483
/* binary operations */
2484
#define FLOAT_BINOP(name)                                          \
2485
uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
2486
{                                                                  \
2487
    uint64_t dt2;                                                  \
2488
                                                                   \
2489
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2490
    dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
2491
    update_fcr31();                                                \
2492
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2493
        dt2 = FLOAT_QNAN64;                                        \
2494
    return dt2;                                                    \
2495
}                                                                  \
2496
                                                                   \
2497
uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
2498
{                                                                  \
2499
    uint32_t wt2;                                                  \
2500
                                                                   \
2501
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2502
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2503
    update_fcr31();                                                \
2504
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
2505
        wt2 = FLOAT_QNAN32;                                        \
2506
    return wt2;                                                    \
2507
}                                                                  \
2508
                                                                   \
2509
uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
2510
{                                                                  \
2511
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
2512
    uint32_t fsth0 = fdt0 >> 32;                                   \
2513
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
2514
    uint32_t fsth1 = fdt1 >> 32;                                   \
2515
    uint32_t wt2;                                                  \
2516
    uint32_t wth2;                                                 \
2517
                                                                   \
2518
    set_float_exception_flags(0, &env->active_fpu.fp_status);            \
2519
    wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
2520
    wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
2521
    update_fcr31();                                                \
2522
    if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
2523
        wt2 = FLOAT_QNAN32;                                        \
2524
        wth2 = FLOAT_QNAN32;                                       \
2525
    }                                                              \
2526
    return ((uint64_t)wth2 << 32) | wt2;                           \
2527
}
2528

    
2529
FLOAT_BINOP(add)
2530
FLOAT_BINOP(sub)
2531
FLOAT_BINOP(mul)
2532
FLOAT_BINOP(div)
2533
#undef FLOAT_BINOP
2534

    
2535
/* ternary operations */
2536
#define FLOAT_TERNOP(name1, name2)                                        \
2537
uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
2538
                                           uint64_t fdt2)                 \
2539
{                                                                         \
2540
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2541
    return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2542
}                                                                         \
2543
                                                                          \
2544
uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
2545
                                           uint32_t fst2)                 \
2546
{                                                                         \
2547
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2548
    return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2549
}                                                                         \
2550
                                                                          \
2551
uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2552
                                            uint64_t fdt2)                \
2553
{                                                                         \
2554
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2555
    uint32_t fsth0 = fdt0 >> 32;                                          \
2556
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2557
    uint32_t fsth1 = fdt1 >> 32;                                          \
2558
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2559
    uint32_t fsth2 = fdt2 >> 32;                                          \
2560
                                                                          \
2561
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2562
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2563
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2564
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2565
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2566
}
2567

    
2568
FLOAT_TERNOP(mul, add)
2569
FLOAT_TERNOP(mul, sub)
2570
#undef FLOAT_TERNOP
2571

    
2572
/* negated ternary operations */
2573
#define FLOAT_NTERNOP(name1, name2)                                       \
2574
uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2575
                                           uint64_t fdt2)                 \
2576
{                                                                         \
2577
    fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
2578
    fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
2579
    return float64_chs(fdt2);                                             \
2580
}                                                                         \
2581
                                                                          \
2582
uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2583
                                           uint32_t fst2)                 \
2584
{                                                                         \
2585
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2586
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2587
    return float32_chs(fst2);                                             \
2588
}                                                                         \
2589
                                                                          \
2590
uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2591
                                           uint64_t fdt2)                 \
2592
{                                                                         \
2593
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
2594
    uint32_t fsth0 = fdt0 >> 32;                                          \
2595
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
2596
    uint32_t fsth1 = fdt1 >> 32;                                          \
2597
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
2598
    uint32_t fsth2 = fdt2 >> 32;                                          \
2599
                                                                          \
2600
    fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
2601
    fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
2602
    fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
2603
    fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
2604
    fst2 = float32_chs(fst2);                                             \
2605
    fsth2 = float32_chs(fsth2);                                           \
2606
    return ((uint64_t)fsth2 << 32) | fst2;                                \
2607
}
2608

    
2609
FLOAT_NTERNOP(mul, add)
2610
FLOAT_NTERNOP(mul, sub)
2611
#undef FLOAT_NTERNOP
2612

    
2613
/* MIPS specific binary operations */
2614
uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2615
{
2616
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2617
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2618
    fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2619
    update_fcr31();
2620
    return fdt2;
2621
}
2622

    
2623
uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
2624
{
2625
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2626
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2627
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2628
    update_fcr31();
2629
    return fst2;
2630
}
2631

    
2632
uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2633
{
2634
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2635
    uint32_t fsth0 = fdt0 >> 32;
2636
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2637
    uint32_t fsth2 = fdt2 >> 32;
2638

    
2639
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2640
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2641
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2642
    fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2643
    fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
2644
    update_fcr31();
2645
    return ((uint64_t)fsth2 << 32) | fst2;
2646
}
2647

    
2648
uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
2649
{
2650
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2651
    fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2652
    fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
2653
    fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
2654
    update_fcr31();
2655
    return fdt2;
2656
}
2657

    
2658
uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
2659
{
2660
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2661
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2662
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2663
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2664
    update_fcr31();
2665
    return fst2;
2666
}
2667

    
2668
uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
2669
{
2670
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2671
    uint32_t fsth0 = fdt0 >> 32;
2672
    uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2673
    uint32_t fsth2 = fdt2 >> 32;
2674

    
2675
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2676
    fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2677
    fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2678
    fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2679
    fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
2680
    fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2681
    fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
2682
    update_fcr31();
2683
    return ((uint64_t)fsth2 << 32) | fst2;
2684
}
2685

    
2686
uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
2687
{
2688
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2689
    uint32_t fsth0 = fdt0 >> 32;
2690
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2691
    uint32_t fsth1 = fdt1 >> 32;
2692
    uint32_t fst2;
2693
    uint32_t fsth2;
2694

    
2695
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2696
    fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
2697
    fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
2698
    update_fcr31();
2699
    return ((uint64_t)fsth2 << 32) | fst2;
2700
}
2701

    
2702
uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
2703
{
2704
    uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2705
    uint32_t fsth0 = fdt0 >> 32;
2706
    uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2707
    uint32_t fsth1 = fdt1 >> 32;
2708
    uint32_t fst2;
2709
    uint32_t fsth2;
2710

    
2711
    set_float_exception_flags(0, &env->active_fpu.fp_status);
2712
    fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
2713
    fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
2714
    update_fcr31();
2715
    return ((uint64_t)fsth2 << 32) | fst2;
2716
}
2717

    
2718
/* compare operations */
2719
#define FOP_COND_D(op, cond)                                   \
2720
void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2721
{                                                              \
2722
    int c = cond;                                              \
2723
    update_fcr31();                                            \
2724
    if (c)                                                     \
2725
        SET_FP_COND(cc, env->active_fpu);                      \
2726
    else                                                       \
2727
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2728
}                                                              \
2729
void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2730
{                                                              \
2731
    int c;                                                     \
2732
    fdt0 = float64_abs(fdt0);                                  \
2733
    fdt1 = float64_abs(fdt1);                                  \
2734
    c = cond;                                                  \
2735
    update_fcr31();                                            \
2736
    if (c)                                                     \
2737
        SET_FP_COND(cc, env->active_fpu);                      \
2738
    else                                                       \
2739
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2740
}
2741

    
2742
static int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
2743
{
2744
    if (float64_is_signaling_nan(a) ||
2745
        float64_is_signaling_nan(b) ||
2746
        (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
2747
        float_raise(float_flag_invalid, status);
2748
        return 1;
2749
    } else if (float64_is_nan(a) || float64_is_nan(b)) {
2750
        return 1;
2751
    } else {
2752
        return 0;
2753
    }
2754
}
2755

    
2756
/* NOTE: the comma operator will make "cond" to eval to false,
2757
 * but float*_is_unordered() is still called. */
2758
FOP_COND_D(f,   (float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2759
FOP_COND_D(un,  float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status))
2760
FOP_COND_D(eq,  !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2761
FOP_COND_D(ueq, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2762
FOP_COND_D(olt, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2763
FOP_COND_D(ult, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2764
FOP_COND_D(ole, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2765
FOP_COND_D(ule, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2766
/* NOTE: the comma operator will make "cond" to eval to false,
2767
 * but float*_is_unordered() is still called. */
2768
FOP_COND_D(sf,  (float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status), 0))
2769
FOP_COND_D(ngle,float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status))
2770
FOP_COND_D(seq, !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2771
FOP_COND_D(ngl, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2772
FOP_COND_D(lt,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2773
FOP_COND_D(nge, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2774
FOP_COND_D(le,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2775
FOP_COND_D(ngt, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2776

    
2777
#define FOP_COND_S(op, cond)                                   \
2778
void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
2779
{                                                              \
2780
    int c = cond;                                              \
2781
    update_fcr31();                                            \
2782
    if (c)                                                     \
2783
        SET_FP_COND(cc, env->active_fpu);                      \
2784
    else                                                       \
2785
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2786
}                                                              \
2787
void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2788
{                                                              \
2789
    int c;                                                     \
2790
    fst0 = float32_abs(fst0);                                  \
2791
    fst1 = float32_abs(fst1);                                  \
2792
    c = cond;                                                  \
2793
    update_fcr31();                                            \
2794
    if (c)                                                     \
2795
        SET_FP_COND(cc, env->active_fpu);                      \
2796
    else                                                       \
2797
        CLEAR_FP_COND(cc, env->active_fpu);                    \
2798
}
2799

    
2800
static flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
2801
{
2802
    if (float32_is_signaling_nan(a) ||
2803
        float32_is_signaling_nan(b) ||
2804
        (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
2805
        float_raise(float_flag_invalid, status);
2806
        return 1;
2807
    } else if (float32_is_nan(a) || float32_is_nan(b)) {
2808
        return 1;
2809
    } else {
2810
        return 0;
2811
    }
2812
}
2813

    
2814
/* NOTE: the comma operator will make "cond" to eval to false,
2815
 * but float*_is_unordered() is still called. */
2816
FOP_COND_S(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0))
2817
FOP_COND_S(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status))
2818
FOP_COND_S(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2819
FOP_COND_S(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2820
FOP_COND_S(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2821
FOP_COND_S(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2822
FOP_COND_S(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2823
FOP_COND_S(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2824
/* NOTE: the comma operator will make "cond" to eval to false,
2825
 * but float*_is_unordered() is still called. */
2826
FOP_COND_S(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0))
2827
FOP_COND_S(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status))
2828
FOP_COND_S(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2829
FOP_COND_S(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
2830
FOP_COND_S(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2831
FOP_COND_S(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
2832
FOP_COND_S(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
2833
FOP_COND_S(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
2834

    
2835
#define FOP_COND_PS(op, condl, condh)                           \
2836
void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
2837
{                                                               \
2838
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2839
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
2840
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
2841
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
2842
    int cl = condl;                                             \
2843
    int ch = condh;                                             \
2844
                                                                \
2845
    update_fcr31();                                             \
2846
    if (cl)                                                     \
2847
        SET_FP_COND(cc, env->active_fpu);                       \
2848
    else                                                        \
2849
        CLEAR_FP_COND(cc, env->active_fpu);                     \
2850
    if (ch)                                                     \
2851
        SET_FP_COND(cc + 1, env->active_fpu);                   \
2852
    else                                                        \
2853
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
2854
}                                                               \
2855
void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2856
{                                                               \
2857
    uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
2858
    uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
2859
    uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
2860
    uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
2861
    int cl = condl;                                             \
2862
    int ch = condh;                                             \
2863
                                                                \
2864
    update_fcr31();                                             \
2865
    if (cl)                                                     \
2866
        SET_FP_COND(cc, env->active_fpu);                       \
2867
    else                                                        \
2868
        CLEAR_FP_COND(cc, env->active_fpu);                     \
2869
    if (ch)                                                     \
2870
        SET_FP_COND(cc + 1, env->active_fpu);                   \
2871
    else                                                        \
2872
        CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
2873
}
2874

    
2875
/* NOTE: the comma operator will make "cond" to eval to false,
2876
 * but float*_is_unordered() is still called. */
2877
FOP_COND_PS(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0),
2878
                 (float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status), 0))
2879
FOP_COND_PS(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status),
2880
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status))
2881
FOP_COND_PS(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2882
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2883
FOP_COND_PS(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2884
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2885
FOP_COND_PS(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2886
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2887
FOP_COND_PS(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2888
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2889
FOP_COND_PS(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
2890
                 !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
2891
FOP_COND_PS(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
2892
                 float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
2893
/* NOTE: the comma operator will make "cond" to eval to false,
2894
 * but float*_is_unordered() is still called. */
2895
FOP_COND_PS(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0),
2896
                 (float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status), 0))
2897
FOP_COND_PS(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status),
2898
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status))
2899
FOP_COND_PS(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2900
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2901
FOP_COND_PS(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
2902
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
2903
FOP_COND_PS(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2904
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2905
FOP_COND_PS(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
2906
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
2907
FOP_COND_PS(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
2908
                 !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
2909
FOP_COND_PS(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
2910
                 float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))