Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ d1258698

History | View | Annotate | Download (97.6 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include <string.h>
21
#include "exec.h"
22
#include "host-utils.h"
23
#include "helper.h"
24

    
25
#include "helper_regs.h"
26

    
27
//#define DEBUG_OP
28
//#define DEBUG_EXCEPTIONS
29
//#define DEBUG_SOFTWARE_TLB
30

    
31
/*****************************************************************************/
32
/* Exceptions processing helpers */
33

    
34
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
35
{
36
#if 0
37
    printf("Raise exception %3x code : %d\n", exception, error_code);
38
#endif
39
    env->exception_index = exception;
40
    env->error_code = error_code;
41
    cpu_loop_exit();
42
}
43

    
44
void helper_raise_exception (uint32_t exception)
45
{
46
    helper_raise_exception_err(exception, 0);
47
}
48

    
49
/*****************************************************************************/
50
/* Registers load and stores */
51
target_ulong helper_load_cr (void)
52
{
53
    return (env->crf[0] << 28) |
54
           (env->crf[1] << 24) |
55
           (env->crf[2] << 20) |
56
           (env->crf[3] << 16) |
57
           (env->crf[4] << 12) |
58
           (env->crf[5] << 8) |
59
           (env->crf[6] << 4) |
60
           (env->crf[7] << 0);
61
}
62

    
63
void helper_store_cr (target_ulong val, uint32_t mask)
64
{
65
    int i, sh;
66

    
67
    for (i = 0, sh = 7; i < 8; i++, sh--) {
68
        if (mask & (1 << sh))
69
            env->crf[i] = (val >> (sh * 4)) & 0xFUL;
70
    }
71
}
72

    
73
/*****************************************************************************/
74
/* SPR accesses */
75
void helper_load_dump_spr (uint32_t sprn)
76
{
77
    if (loglevel != 0) {
78
        fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
79
                sprn, sprn, env->spr[sprn]);
80
    }
81
}
82

    
83
void helper_store_dump_spr (uint32_t sprn)
84
{
85
    if (loglevel != 0) {
86
        fprintf(logfile, "Write SPR %d %03x <= " ADDRX "\n",
87
                sprn, sprn, env->spr[sprn]);
88
    }
89
}
90

    
91
target_ulong helper_load_tbl (void)
92
{
93
    return cpu_ppc_load_tbl(env);
94
}
95

    
96
target_ulong helper_load_tbu (void)
97
{
98
    return cpu_ppc_load_tbu(env);
99
}
100

    
101
target_ulong helper_load_atbl (void)
102
{
103
    return cpu_ppc_load_atbl(env);
104
}
105

    
106
target_ulong helper_load_atbu (void)
107
{
108
    return cpu_ppc_load_atbu(env);
109
}
110

    
111
target_ulong helper_load_601_rtcl (void)
112
{
113
    return cpu_ppc601_load_rtcl(env);
114
}
115

    
116
target_ulong helper_load_601_rtcu (void)
117
{
118
    return cpu_ppc601_load_rtcu(env);
119
}
120

    
121
#if !defined(CONFIG_USER_ONLY)
122
#if defined (TARGET_PPC64)
123
void helper_store_asr (target_ulong val)
124
{
125
    ppc_store_asr(env, val);
126
}
127
#endif
128

    
129
void helper_store_sdr1 (target_ulong val)
130
{
131
    ppc_store_sdr1(env, val);
132
}
133

    
134
void helper_store_tbl (target_ulong val)
135
{
136
    cpu_ppc_store_tbl(env, val);
137
}
138

    
139
void helper_store_tbu (target_ulong val)
140
{
141
    cpu_ppc_store_tbu(env, val);
142
}
143

    
144
void helper_store_atbl (target_ulong val)
145
{
146
    cpu_ppc_store_atbl(env, val);
147
}
148

    
149
void helper_store_atbu (target_ulong val)
150
{
151
    cpu_ppc_store_atbu(env, val);
152
}
153

    
154
void helper_store_601_rtcl (target_ulong val)
155
{
156
    cpu_ppc601_store_rtcl(env, val);
157
}
158

    
159
void helper_store_601_rtcu (target_ulong val)
160
{
161
    cpu_ppc601_store_rtcu(env, val);
162
}
163

    
164
target_ulong helper_load_decr (void)
165
{
166
    return cpu_ppc_load_decr(env);
167
}
168

    
169
void helper_store_decr (target_ulong val)
170
{
171
    cpu_ppc_store_decr(env, val);
172
}
173

    
174
void helper_store_hid0_601 (target_ulong val)
175
{
176
    target_ulong hid0;
177

    
178
    hid0 = env->spr[SPR_HID0];
179
    if ((val ^ hid0) & 0x00000008) {
180
        /* Change current endianness */
181
        env->hflags &= ~(1 << MSR_LE);
182
        env->hflags_nmsr &= ~(1 << MSR_LE);
183
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
184
        env->hflags |= env->hflags_nmsr;
185
        if (loglevel != 0) {
186
            fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
187
                    __func__, val & 0x8 ? 'l' : 'b', env->hflags);
188
        }
189
    }
190
    env->spr[SPR_HID0] = (uint32_t)val;
191
}
192

    
193
void helper_store_403_pbr (uint32_t num, target_ulong value)
194
{
195
    if (likely(env->pb[num] != value)) {
196
        env->pb[num] = value;
197
        /* Should be optimized */
198
        tlb_flush(env, 1);
199
    }
200
}
201

    
202
target_ulong helper_load_40x_pit (void)
203
{
204
    return load_40x_pit(env);
205
}
206

    
207
void helper_store_40x_pit (target_ulong val)
208
{
209
    store_40x_pit(env, val);
210
}
211

    
212
void helper_store_40x_dbcr0 (target_ulong val)
213
{
214
    store_40x_dbcr0(env, val);
215
}
216

    
217
void helper_store_40x_sler (target_ulong val)
218
{
219
    store_40x_sler(env, val);
220
}
221

    
222
void helper_store_booke_tcr (target_ulong val)
223
{
224
    store_booke_tcr(env, val);
225
}
226

    
227
void helper_store_booke_tsr (target_ulong val)
228
{
229
    store_booke_tsr(env, val);
230
}
231

    
232
void helper_store_ibatu (uint32_t nr, target_ulong val)
233
{
234
    ppc_store_ibatu(env, nr, val);
235
}
236

    
237
void helper_store_ibatl (uint32_t nr, target_ulong val)
238
{
239
    ppc_store_ibatl(env, nr, val);
240
}
241

    
242
void helper_store_dbatu (uint32_t nr, target_ulong val)
243
{
244
    ppc_store_dbatu(env, nr, val);
245
}
246

    
247
void helper_store_dbatl (uint32_t nr, target_ulong val)
248
{
249
    ppc_store_dbatl(env, nr, val);
250
}
251

    
252
void helper_store_601_batl (uint32_t nr, target_ulong val)
253
{
254
    ppc_store_ibatl_601(env, nr, val);
255
}
256

    
257
void helper_store_601_batu (uint32_t nr, target_ulong val)
258
{
259
    ppc_store_ibatu_601(env, nr, val);
260
}
261
#endif
262

    
263
/*****************************************************************************/
264
/* Memory load and stores */
265

    
266
static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
267
{
268
#if defined(TARGET_PPC64)
269
        if (!msr_sf)
270
            return (uint32_t)(addr + arg);
271
        else
272
#endif
273
            return addr + arg;
274
}
275

    
276
void helper_lmw (target_ulong addr, uint32_t reg)
277
{
278
    for (; reg < 32; reg++) {
279
        if (msr_le)
280
            env->gpr[reg] = bswap32(ldl(addr));
281
        else
282
            env->gpr[reg] = ldl(addr);
283
        addr = addr_add(addr, 4);
284
    }
285
}
286

    
287
void helper_stmw (target_ulong addr, uint32_t reg)
288
{
289
    for (; reg < 32; reg++) {
290
        if (msr_le)
291
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
292
        else
293
            stl(addr, (uint32_t)env->gpr[reg]);
294
        addr = addr_add(addr, 4);
295
    }
296
}
297

    
298
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
299
{
300
    int sh;
301
    for (; nb > 3; nb -= 4) {
302
        env->gpr[reg] = ldl(addr);
303
        reg = (reg + 1) % 32;
304
        addr = addr_add(addr, 4);
305
    }
306
    if (unlikely(nb > 0)) {
307
        env->gpr[reg] = 0;
308
        for (sh = 24; nb > 0; nb--, sh -= 8) {
309
            env->gpr[reg] |= ldub(addr) << sh;
310
            addr = addr_add(addr, 1);
311
        }
312
    }
313
}
314
/* PPC32 specification says we must generate an exception if
315
 * rA is in the range of registers to be loaded.
316
 * In an other hand, IBM says this is valid, but rA won't be loaded.
317
 * For now, I'll follow the spec...
318
 */
319
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
320
{
321
    if (likely(xer_bc != 0)) {
322
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
323
                     (reg < rb && (reg + xer_bc) > rb))) {
324
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
325
                                       POWERPC_EXCP_INVAL |
326
                                       POWERPC_EXCP_INVAL_LSWX);
327
        } else {
328
            helper_lsw(addr, xer_bc, reg);
329
        }
330
    }
331
}
332

    
333
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
334
{
335
    int sh;
336
    for (; nb > 3; nb -= 4) {
337
        stl(addr, env->gpr[reg]);
338
        reg = (reg + 1) % 32;
339
        addr = addr_add(addr, 4);
340
    }
341
    if (unlikely(nb > 0)) {
342
        for (sh = 24; nb > 0; nb--, sh -= 8) {
343
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
344
            addr = addr_add(addr, 1);
345
        }
346
    }
347
}
348

    
349
static void do_dcbz(target_ulong addr, int dcache_line_size)
350
{
351
    addr &= ~(dcache_line_size - 1);
352
    int i;
353
    for (i = 0 ; i < dcache_line_size ; i += 4) {
354
        stl(addr + i , 0);
355
    }
356
    if (env->reserve == addr)
357
        env->reserve = (target_ulong)-1ULL;
358
}
359

    
360
void helper_dcbz(target_ulong addr)
361
{
362
    do_dcbz(addr, env->dcache_line_size);
363
}
364

    
365
void helper_dcbz_970(target_ulong addr)
366
{
367
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
368
        do_dcbz(addr, 32);
369
    else
370
        do_dcbz(addr, env->dcache_line_size);
371
}
372

    
373
void helper_icbi(target_ulong addr)
374
{
375
    uint32_t tmp;
376

    
377
    addr &= ~(env->dcache_line_size - 1);
378
    /* Invalidate one cache line :
379
     * PowerPC specification says this is to be treated like a load
380
     * (not a fetch) by the MMU. To be sure it will be so,
381
     * do the load "by hand".
382
     */
383
    tmp = ldl(addr);
384
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
385
}
386

    
387
// XXX: to be tested
388
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
389
{
390
    int i, c, d;
391
    d = 24;
392
    for (i = 0; i < xer_bc; i++) {
393
        c = ldub(addr);
394
        addr = addr_add(addr, 1);
395
        /* ra (if not 0) and rb are never modified */
396
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
397
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
398
        }
399
        if (unlikely(c == xer_cmp))
400
            break;
401
        if (likely(d != 0)) {
402
            d -= 8;
403
        } else {
404
            d = 24;
405
            reg++;
406
            reg = reg & 0x1F;
407
        }
408
    }
409
    return i;
410
}
411

    
412
/*****************************************************************************/
413
/* Fixed point operations helpers */
414
#if defined(TARGET_PPC64)
415

    
416
/* multiply high word */
417
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
418
{
419
    uint64_t tl, th;
420

    
421
    muls64(&tl, &th, arg1, arg2);
422
    return th;
423
}
424

    
425
/* multiply high word unsigned */
426
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
427
{
428
    uint64_t tl, th;
429

    
430
    mulu64(&tl, &th, arg1, arg2);
431
    return th;
432
}
433

    
434
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
435
{
436
    int64_t th;
437
    uint64_t tl;
438

    
439
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
440
    /* If th != 0 && th != -1, then we had an overflow */
441
    if (likely((uint64_t)(th + 1) <= 1)) {
442
        env->xer &= ~(1 << XER_OV);
443
    } else {
444
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
445
    }
446
    return (int64_t)tl;
447
}
448
#endif
449

    
450
target_ulong helper_cntlzw (target_ulong t)
451
{
452
    return clz32(t);
453
}
454

    
455
#if defined(TARGET_PPC64)
456
target_ulong helper_cntlzd (target_ulong t)
457
{
458
    return clz64(t);
459
}
460
#endif
461

    
462
/* shift right arithmetic helper */
463
target_ulong helper_sraw (target_ulong value, target_ulong shift)
464
{
465
    int32_t ret;
466

    
467
    if (likely(!(shift & 0x20))) {
468
        if (likely((uint32_t)shift != 0)) {
469
            shift &= 0x1f;
470
            ret = (int32_t)value >> shift;
471
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
472
                env->xer &= ~(1 << XER_CA);
473
            } else {
474
                env->xer |= (1 << XER_CA);
475
            }
476
        } else {
477
            ret = (int32_t)value;
478
            env->xer &= ~(1 << XER_CA);
479
        }
480
    } else {
481
        ret = (int32_t)value >> 31;
482
        if (ret) {
483
            env->xer |= (1 << XER_CA);
484
        } else {
485
            env->xer &= ~(1 << XER_CA);
486
        }
487
    }
488
    return (target_long)ret;
489
}
490

    
491
#if defined(TARGET_PPC64)
492
target_ulong helper_srad (target_ulong value, target_ulong shift)
493
{
494
    int64_t ret;
495

    
496
    if (likely(!(shift & 0x40))) {
497
        if (likely((uint64_t)shift != 0)) {
498
            shift &= 0x3f;
499
            ret = (int64_t)value >> shift;
500
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
501
                env->xer &= ~(1 << XER_CA);
502
            } else {
503
                env->xer |= (1 << XER_CA);
504
            }
505
        } else {
506
            ret = (int64_t)value;
507
            env->xer &= ~(1 << XER_CA);
508
        }
509
    } else {
510
        ret = (int64_t)value >> 63;
511
        if (ret) {
512
            env->xer |= (1 << XER_CA);
513
        } else {
514
            env->xer &= ~(1 << XER_CA);
515
        }
516
    }
517
    return ret;
518
}
519
#endif
520

    
521
target_ulong helper_popcntb (target_ulong val)
522
{
523
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
524
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
525
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
526
    return val;
527
}
528

    
529
#if defined(TARGET_PPC64)
530
target_ulong helper_popcntb_64 (target_ulong val)
531
{
532
    val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
533
    val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
534
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
535
    return val;
536
}
537
#endif
538

    
539
/*****************************************************************************/
540
/* Floating point operations helpers */
541
uint64_t helper_float32_to_float64(uint32_t arg)
542
{
543
    CPU_FloatU f;
544
    CPU_DoubleU d;
545
    f.l = arg;
546
    d.d = float32_to_float64(f.f, &env->fp_status);
547
    return d.ll;
548
}
549

    
550
uint32_t helper_float64_to_float32(uint64_t arg)
551
{
552
    CPU_FloatU f;
553
    CPU_DoubleU d;
554
    d.ll = arg;
555
    f.f = float64_to_float32(d.d, &env->fp_status);
556
    return f.l;
557
}
558

    
559
static always_inline int isden (float64 d)
560
{
561
    CPU_DoubleU u;
562

    
563
    u.d = d;
564

    
565
    return ((u.ll >> 52) & 0x7FF) == 0;
566
}
567

    
568
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
569
{
570
    CPU_DoubleU farg;
571
    int isneg;
572
    int ret;
573
    farg.ll = arg;
574
    isneg = float64_is_neg(farg.d);
575
    if (unlikely(float64_is_nan(farg.d))) {
576
        if (float64_is_signaling_nan(farg.d)) {
577
            /* Signaling NaN: flags are undefined */
578
            ret = 0x00;
579
        } else {
580
            /* Quiet NaN */
581
            ret = 0x11;
582
        }
583
    } else if (unlikely(float64_is_infinity(farg.d))) {
584
        /* +/- infinity */
585
        if (isneg)
586
            ret = 0x09;
587
        else
588
            ret = 0x05;
589
    } else {
590
        if (float64_is_zero(farg.d)) {
591
            /* +/- zero */
592
            if (isneg)
593
                ret = 0x12;
594
            else
595
                ret = 0x02;
596
        } else {
597
            if (isden(farg.d)) {
598
                /* Denormalized numbers */
599
                ret = 0x10;
600
            } else {
601
                /* Normalized numbers */
602
                ret = 0x00;
603
            }
604
            if (isneg) {
605
                ret |= 0x08;
606
            } else {
607
                ret |= 0x04;
608
            }
609
        }
610
    }
611
    if (set_fprf) {
612
        /* We update FPSCR_FPRF */
613
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
614
        env->fpscr |= ret << FPSCR_FPRF;
615
    }
616
    /* We just need fpcc to update Rc1 */
617
    return ret & 0xF;
618
}
619

    
620
/* Floating-point invalid operations exception */
621
static always_inline uint64_t fload_invalid_op_excp (int op)
622
{
623
    uint64_t ret = 0;
624
    int ve;
625

    
626
    ve = fpscr_ve;
627
    switch (op) {
628
    case POWERPC_EXCP_FP_VXSNAN:
629
        env->fpscr |= 1 << FPSCR_VXSNAN;
630
        break;
631
    case POWERPC_EXCP_FP_VXSOFT:
632
        env->fpscr |= 1 << FPSCR_VXSOFT;
633
        break;
634
    case POWERPC_EXCP_FP_VXISI:
635
        /* Magnitude subtraction of infinities */
636
        env->fpscr |= 1 << FPSCR_VXISI;
637
        goto update_arith;
638
    case POWERPC_EXCP_FP_VXIDI:
639
        /* Division of infinity by infinity */
640
        env->fpscr |= 1 << FPSCR_VXIDI;
641
        goto update_arith;
642
    case POWERPC_EXCP_FP_VXZDZ:
643
        /* Division of zero by zero */
644
        env->fpscr |= 1 << FPSCR_VXZDZ;
645
        goto update_arith;
646
    case POWERPC_EXCP_FP_VXIMZ:
647
        /* Multiplication of zero by infinity */
648
        env->fpscr |= 1 << FPSCR_VXIMZ;
649
        goto update_arith;
650
    case POWERPC_EXCP_FP_VXVC:
651
        /* Ordered comparison of NaN */
652
        env->fpscr |= 1 << FPSCR_VXVC;
653
        env->fpscr &= ~(0xF << FPSCR_FPCC);
654
        env->fpscr |= 0x11 << FPSCR_FPCC;
655
        /* We must update the target FPR before raising the exception */
656
        if (ve != 0) {
657
            env->exception_index = POWERPC_EXCP_PROGRAM;
658
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
659
            /* Update the floating-point enabled exception summary */
660
            env->fpscr |= 1 << FPSCR_FEX;
661
            /* Exception is differed */
662
            ve = 0;
663
        }
664
        break;
665
    case POWERPC_EXCP_FP_VXSQRT:
666
        /* Square root of a negative number */
667
        env->fpscr |= 1 << FPSCR_VXSQRT;
668
    update_arith:
669
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
670
        if (ve == 0) {
671
            /* Set the result to quiet NaN */
672
            ret = 0xFFF8000000000000ULL;
673
            env->fpscr &= ~(0xF << FPSCR_FPCC);
674
            env->fpscr |= 0x11 << FPSCR_FPCC;
675
        }
676
        break;
677
    case POWERPC_EXCP_FP_VXCVI:
678
        /* Invalid conversion */
679
        env->fpscr |= 1 << FPSCR_VXCVI;
680
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
681
        if (ve == 0) {
682
            /* Set the result to quiet NaN */
683
            ret = 0xFFF8000000000000ULL;
684
            env->fpscr &= ~(0xF << FPSCR_FPCC);
685
            env->fpscr |= 0x11 << FPSCR_FPCC;
686
        }
687
        break;
688
    }
689
    /* Update the floating-point invalid operation summary */
690
    env->fpscr |= 1 << FPSCR_VX;
691
    /* Update the floating-point exception summary */
692
    env->fpscr |= 1 << FPSCR_FX;
693
    if (ve != 0) {
694
        /* Update the floating-point enabled exception summary */
695
        env->fpscr |= 1 << FPSCR_FEX;
696
        if (msr_fe0 != 0 || msr_fe1 != 0)
697
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
698
    }
699
    return ret;
700
}
701

    
702
static always_inline void float_zero_divide_excp (void)
703
{
704
    env->fpscr |= 1 << FPSCR_ZX;
705
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
706
    /* Update the floating-point exception summary */
707
    env->fpscr |= 1 << FPSCR_FX;
708
    if (fpscr_ze != 0) {
709
        /* Update the floating-point enabled exception summary */
710
        env->fpscr |= 1 << FPSCR_FEX;
711
        if (msr_fe0 != 0 || msr_fe1 != 0) {
712
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
713
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
714
        }
715
    }
716
}
717

    
718
static always_inline void float_overflow_excp (void)
719
{
720
    env->fpscr |= 1 << FPSCR_OX;
721
    /* Update the floating-point exception summary */
722
    env->fpscr |= 1 << FPSCR_FX;
723
    if (fpscr_oe != 0) {
724
        /* XXX: should adjust the result */
725
        /* Update the floating-point enabled exception summary */
726
        env->fpscr |= 1 << FPSCR_FEX;
727
        /* We must update the target FPR before raising the exception */
728
        env->exception_index = POWERPC_EXCP_PROGRAM;
729
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
730
    } else {
731
        env->fpscr |= 1 << FPSCR_XX;
732
        env->fpscr |= 1 << FPSCR_FI;
733
    }
734
}
735

    
736
static always_inline void float_underflow_excp (void)
737
{
738
    env->fpscr |= 1 << FPSCR_UX;
739
    /* Update the floating-point exception summary */
740
    env->fpscr |= 1 << FPSCR_FX;
741
    if (fpscr_ue != 0) {
742
        /* XXX: should adjust the result */
743
        /* Update the floating-point enabled exception summary */
744
        env->fpscr |= 1 << FPSCR_FEX;
745
        /* We must update the target FPR before raising the exception */
746
        env->exception_index = POWERPC_EXCP_PROGRAM;
747
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
748
    }
749
}
750

    
751
static always_inline void float_inexact_excp (void)
752
{
753
    env->fpscr |= 1 << FPSCR_XX;
754
    /* Update the floating-point exception summary */
755
    env->fpscr |= 1 << FPSCR_FX;
756
    if (fpscr_xe != 0) {
757
        /* Update the floating-point enabled exception summary */
758
        env->fpscr |= 1 << FPSCR_FEX;
759
        /* We must update the target FPR before raising the exception */
760
        env->exception_index = POWERPC_EXCP_PROGRAM;
761
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
762
    }
763
}
764

    
765
static always_inline void fpscr_set_rounding_mode (void)
766
{
767
    int rnd_type;
768

    
769
    /* Set rounding mode */
770
    switch (fpscr_rn) {
771
    case 0:
772
        /* Best approximation (round to nearest) */
773
        rnd_type = float_round_nearest_even;
774
        break;
775
    case 1:
776
        /* Smaller magnitude (round toward zero) */
777
        rnd_type = float_round_to_zero;
778
        break;
779
    case 2:
780
        /* Round toward +infinite */
781
        rnd_type = float_round_up;
782
        break;
783
    default:
784
    case 3:
785
        /* Round toward -infinite */
786
        rnd_type = float_round_down;
787
        break;
788
    }
789
    set_float_rounding_mode(rnd_type, &env->fp_status);
790
}
791

    
792
void helper_fpscr_clrbit (uint32_t bit)
793
{
794
    int prev;
795

    
796
    prev = (env->fpscr >> bit) & 1;
797
    env->fpscr &= ~(1 << bit);
798
    if (prev == 1) {
799
        switch (bit) {
800
        case FPSCR_RN1:
801
        case FPSCR_RN:
802
            fpscr_set_rounding_mode();
803
            break;
804
        default:
805
            break;
806
        }
807
    }
808
}
809

    
810
void helper_fpscr_setbit (uint32_t bit)
811
{
812
    int prev;
813

    
814
    prev = (env->fpscr >> bit) & 1;
815
    env->fpscr |= 1 << bit;
816
    if (prev == 0) {
817
        switch (bit) {
818
        case FPSCR_VX:
819
            env->fpscr |= 1 << FPSCR_FX;
820
            if (fpscr_ve)
821
                goto raise_ve;
822
        case FPSCR_OX:
823
            env->fpscr |= 1 << FPSCR_FX;
824
            if (fpscr_oe)
825
                goto raise_oe;
826
            break;
827
        case FPSCR_UX:
828
            env->fpscr |= 1 << FPSCR_FX;
829
            if (fpscr_ue)
830
                goto raise_ue;
831
            break;
832
        case FPSCR_ZX:
833
            env->fpscr |= 1 << FPSCR_FX;
834
            if (fpscr_ze)
835
                goto raise_ze;
836
            break;
837
        case FPSCR_XX:
838
            env->fpscr |= 1 << FPSCR_FX;
839
            if (fpscr_xe)
840
                goto raise_xe;
841
            break;
842
        case FPSCR_VXSNAN:
843
        case FPSCR_VXISI:
844
        case FPSCR_VXIDI:
845
        case FPSCR_VXZDZ:
846
        case FPSCR_VXIMZ:
847
        case FPSCR_VXVC:
848
        case FPSCR_VXSOFT:
849
        case FPSCR_VXSQRT:
850
        case FPSCR_VXCVI:
851
            env->fpscr |= 1 << FPSCR_VX;
852
            env->fpscr |= 1 << FPSCR_FX;
853
            if (fpscr_ve != 0)
854
                goto raise_ve;
855
            break;
856
        case FPSCR_VE:
857
            if (fpscr_vx != 0) {
858
            raise_ve:
859
                env->error_code = POWERPC_EXCP_FP;
860
                if (fpscr_vxsnan)
861
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
862
                if (fpscr_vxisi)
863
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
864
                if (fpscr_vxidi)
865
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
866
                if (fpscr_vxzdz)
867
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
868
                if (fpscr_vximz)
869
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
870
                if (fpscr_vxvc)
871
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
872
                if (fpscr_vxsoft)
873
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
874
                if (fpscr_vxsqrt)
875
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
876
                if (fpscr_vxcvi)
877
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
878
                goto raise_excp;
879
            }
880
            break;
881
        case FPSCR_OE:
882
            if (fpscr_ox != 0) {
883
            raise_oe:
884
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
885
                goto raise_excp;
886
            }
887
            break;
888
        case FPSCR_UE:
889
            if (fpscr_ux != 0) {
890
            raise_ue:
891
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
892
                goto raise_excp;
893
            }
894
            break;
895
        case FPSCR_ZE:
896
            if (fpscr_zx != 0) {
897
            raise_ze:
898
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
899
                goto raise_excp;
900
            }
901
            break;
902
        case FPSCR_XE:
903
            if (fpscr_xx != 0) {
904
            raise_xe:
905
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
906
                goto raise_excp;
907
            }
908
            break;
909
        case FPSCR_RN1:
910
        case FPSCR_RN:
911
            fpscr_set_rounding_mode();
912
            break;
913
        default:
914
            break;
915
        raise_excp:
916
            /* Update the floating-point enabled exception summary */
917
            env->fpscr |= 1 << FPSCR_FEX;
918
                /* We have to update Rc1 before raising the exception */
919
            env->exception_index = POWERPC_EXCP_PROGRAM;
920
            break;
921
        }
922
    }
923
}
924

    
925
void helper_store_fpscr (uint64_t arg, uint32_t mask)
926
{
927
    /*
928
     * We use only the 32 LSB of the incoming fpr
929
     */
930
    uint32_t prev, new;
931
    int i;
932

    
933
    prev = env->fpscr;
934
    new = (uint32_t)arg;
935
    new &= ~0x60000000;
936
    new |= prev & 0x60000000;
937
    for (i = 0; i < 8; i++) {
938
        if (mask & (1 << i)) {
939
            env->fpscr &= ~(0xF << (4 * i));
940
            env->fpscr |= new & (0xF << (4 * i));
941
        }
942
    }
943
    /* Update VX and FEX */
944
    if (fpscr_ix != 0)
945
        env->fpscr |= 1 << FPSCR_VX;
946
    else
947
        env->fpscr &= ~(1 << FPSCR_VX);
948
    if ((fpscr_ex & fpscr_eex) != 0) {
949
        env->fpscr |= 1 << FPSCR_FEX;
950
        env->exception_index = POWERPC_EXCP_PROGRAM;
951
        /* XXX: we should compute it properly */
952
        env->error_code = POWERPC_EXCP_FP;
953
    }
954
    else
955
        env->fpscr &= ~(1 << FPSCR_FEX);
956
    fpscr_set_rounding_mode();
957
}
958

    
959
void helper_float_check_status (void)
960
{
961
#ifdef CONFIG_SOFTFLOAT
962
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
963
        (env->error_code & POWERPC_EXCP_FP)) {
964
        /* Differred floating-point exception after target FPR update */
965
        if (msr_fe0 != 0 || msr_fe1 != 0)
966
            helper_raise_exception_err(env->exception_index, env->error_code);
967
    } else {
968
        int status = get_float_exception_flags(&env->fp_status);
969
        if (status & float_flag_divbyzero) {
970
            float_zero_divide_excp();
971
        } else if (status & float_flag_overflow) {
972
            float_overflow_excp();
973
        } else if (status & float_flag_underflow) {
974
            float_underflow_excp();
975
        } else if (status & float_flag_inexact) {
976
            float_inexact_excp();
977
        }
978
    }
979
#else
980
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
981
        (env->error_code & POWERPC_EXCP_FP)) {
982
        /* Differred floating-point exception after target FPR update */
983
        if (msr_fe0 != 0 || msr_fe1 != 0)
984
            helper_raise_exception_err(env->exception_index, env->error_code);
985
    }
986
#endif
987
}
988

    
989
#ifdef CONFIG_SOFTFLOAT
990
void helper_reset_fpstatus (void)
991
{
992
    set_float_exception_flags(0, &env->fp_status);
993
}
994
#endif
995

    
996
/* fadd - fadd. */
997
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
998
{
999
    CPU_DoubleU farg1, farg2;
1000

    
1001
    farg1.ll = arg1;
1002
    farg2.ll = arg2;
1003
#if USE_PRECISE_EMULATION
1004
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1005
                 float64_is_signaling_nan(farg2.d))) {
1006
        /* sNaN addition */
1007
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1008
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1009
                      float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1010
        /* Magnitude subtraction of infinities */
1011
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1012
    } else {
1013
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1014
    }
1015
#else
1016
    farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1017
#endif
1018
    return farg1.ll;
1019
}
1020

    
1021
/* fsub - fsub. */
1022
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1023
{
1024
    CPU_DoubleU farg1, farg2;
1025

    
1026
    farg1.ll = arg1;
1027
    farg2.ll = arg2;
1028
#if USE_PRECISE_EMULATION
1029
{
1030
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1031
                 float64_is_signaling_nan(farg2.d))) {
1032
        /* sNaN subtraction */
1033
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1034
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1035
                      float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1036
        /* Magnitude subtraction of infinities */
1037
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1038
    } else {
1039
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1040
    }
1041
}
1042
#else
1043
    farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1044
#endif
1045
    return farg1.ll;
1046
}
1047

    
1048
/* fmul - fmul. */
1049
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1050
{
1051
    CPU_DoubleU farg1, farg2;
1052

    
1053
    farg1.ll = arg1;
1054
    farg2.ll = arg2;
1055
#if USE_PRECISE_EMULATION
1056
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1057
                 float64_is_signaling_nan(farg2.d))) {
1058
        /* sNaN multiplication */
1059
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1060
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1061
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1062
        /* Multiplication of zero by infinity */
1063
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1064
    } else {
1065
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1066
    }
1067
#else
1068
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1069
#endif
1070
    return farg1.ll;
1071
}
1072

    
1073
/* fdiv - fdiv. */
1074
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1075
{
1076
    CPU_DoubleU farg1, farg2;
1077

    
1078
    farg1.ll = arg1;
1079
    farg2.ll = arg2;
1080
#if USE_PRECISE_EMULATION
1081
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1082
                 float64_is_signaling_nan(farg2.d))) {
1083
        /* sNaN division */
1084
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1085
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1086
        /* Division of infinity by infinity */
1087
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1088
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1089
        /* Division of zero by zero */
1090
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1091
    } else {
1092
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1093
    }
1094
#else
1095
    farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1096
#endif
1097
    return farg1.ll;
1098
}
1099

    
1100
/* fabs */
1101
uint64_t helper_fabs (uint64_t arg)
1102
{
1103
    CPU_DoubleU farg;
1104

    
1105
    farg.ll = arg;
1106
    farg.d = float64_abs(farg.d);
1107
    return farg.ll;
1108
}
1109

    
1110
/* fnabs */
1111
uint64_t helper_fnabs (uint64_t arg)
1112
{
1113
    CPU_DoubleU farg;
1114

    
1115
    farg.ll = arg;
1116
    farg.d = float64_abs(farg.d);
1117
    farg.d = float64_chs(farg.d);
1118
    return farg.ll;
1119
}
1120

    
1121
/* fneg */
1122
uint64_t helper_fneg (uint64_t arg)
1123
{
1124
    CPU_DoubleU farg;
1125

    
1126
    farg.ll = arg;
1127
    farg.d = float64_chs(farg.d);
1128
    return farg.ll;
1129
}
1130

    
1131
/* fctiw - fctiw. */
1132
uint64_t helper_fctiw (uint64_t arg)
1133
{
1134
    CPU_DoubleU farg;
1135
    farg.ll = arg;
1136

    
1137
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1138
        /* sNaN conversion */
1139
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1140
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1141
        /* qNan / infinity conversion */
1142
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1143
    } else {
1144
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1145
#if USE_PRECISE_EMULATION
1146
        /* XXX: higher bits are not supposed to be significant.
1147
         *     to make tests easier, return the same as a real PowerPC 750
1148
         */
1149
        farg.ll |= 0xFFF80000ULL << 32;
1150
#endif
1151
    }
1152
    return farg.ll;
1153
}
1154

    
1155
/* fctiwz - fctiwz. */
1156
uint64_t helper_fctiwz (uint64_t arg)
1157
{
1158
    CPU_DoubleU farg;
1159
    farg.ll = arg;
1160

    
1161
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1162
        /* sNaN conversion */
1163
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1164
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1165
        /* qNan / infinity conversion */
1166
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1167
    } else {
1168
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1169
#if USE_PRECISE_EMULATION
1170
        /* XXX: higher bits are not supposed to be significant.
1171
         *     to make tests easier, return the same as a real PowerPC 750
1172
         */
1173
        farg.ll |= 0xFFF80000ULL << 32;
1174
#endif
1175
    }
1176
    return farg.ll;
1177
}
1178

    
1179
#if defined(TARGET_PPC64)
1180
/* fcfid - fcfid. */
1181
uint64_t helper_fcfid (uint64_t arg)
1182
{
1183
    CPU_DoubleU farg;
1184
    farg.d = int64_to_float64(arg, &env->fp_status);
1185
    return farg.ll;
1186
}
1187

    
1188
/* fctid - fctid. */
1189
uint64_t helper_fctid (uint64_t arg)
1190
{
1191
    CPU_DoubleU farg;
1192
    farg.ll = arg;
1193

    
1194
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1195
        /* sNaN conversion */
1196
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1197
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1198
        /* qNan / infinity conversion */
1199
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1200
    } else {
1201
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1202
    }
1203
    return farg.ll;
1204
}
1205

    
1206
/* fctidz - fctidz. */
1207
uint64_t helper_fctidz (uint64_t arg)
1208
{
1209
    CPU_DoubleU farg;
1210
    farg.ll = arg;
1211

    
1212
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1213
        /* sNaN conversion */
1214
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1215
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1216
        /* qNan / infinity conversion */
1217
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1218
    } else {
1219
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1220
    }
1221
    return farg.ll;
1222
}
1223

    
1224
#endif
1225

    
1226
static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1227
{
1228
    CPU_DoubleU farg;
1229
    farg.ll = arg;
1230

    
1231
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1232
        /* sNaN round */
1233
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1234
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1235
        /* qNan / infinity round */
1236
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1237
    } else {
1238
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1239
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1240
        /* Restore rounding mode from FPSCR */
1241
        fpscr_set_rounding_mode();
1242
    }
1243
    return farg.ll;
1244
}
1245

    
1246
uint64_t helper_frin (uint64_t arg)
1247
{
1248
    return do_fri(arg, float_round_nearest_even);
1249
}
1250

    
1251
uint64_t helper_friz (uint64_t arg)
1252
{
1253
    return do_fri(arg, float_round_to_zero);
1254
}
1255

    
1256
uint64_t helper_frip (uint64_t arg)
1257
{
1258
    return do_fri(arg, float_round_up);
1259
}
1260

    
1261
uint64_t helper_frim (uint64_t arg)
1262
{
1263
    return do_fri(arg, float_round_down);
1264
}
1265

    
1266
/* fmadd - fmadd. */
1267
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1268
{
1269
    CPU_DoubleU farg1, farg2, farg3;
1270

    
1271
    farg1.ll = arg1;
1272
    farg2.ll = arg2;
1273
    farg3.ll = arg3;
1274
#if USE_PRECISE_EMULATION
1275
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1276
                 float64_is_signaling_nan(farg2.d) ||
1277
                 float64_is_signaling_nan(farg3.d))) {
1278
        /* sNaN operation */
1279
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1280
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1281
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1282
        /* Multiplication of zero by infinity */
1283
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1284
    } else {
1285
#ifdef FLOAT128
1286
        /* This is the way the PowerPC specification defines it */
1287
        float128 ft0_128, ft1_128;
1288

    
1289
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1290
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1291
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1292
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1293
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1294
            /* Magnitude subtraction of infinities */
1295
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1296
        } else {
1297
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1298
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1299
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1300
        }
1301
#else
1302
        /* This is OK on x86 hosts */
1303
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1304
#endif
1305
    }
1306
#else
1307
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1308
    farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1309
#endif
1310
    return farg1.ll;
1311
}
1312

    
1313
/* fmsub - fmsub. */
1314
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1315
{
1316
    CPU_DoubleU farg1, farg2, farg3;
1317

    
1318
    farg1.ll = arg1;
1319
    farg2.ll = arg2;
1320
    farg3.ll = arg3;
1321
#if USE_PRECISE_EMULATION
1322
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1323
                 float64_is_signaling_nan(farg2.d) ||
1324
                 float64_is_signaling_nan(farg3.d))) {
1325
        /* sNaN operation */
1326
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1327
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1328
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1329
        /* Multiplication of zero by infinity */
1330
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1331
    } else {
1332
#ifdef FLOAT128
1333
        /* This is the way the PowerPC specification defines it */
1334
        float128 ft0_128, ft1_128;
1335

    
1336
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1337
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1338
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1339
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1340
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1341
            /* Magnitude subtraction of infinities */
1342
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1343
        } else {
1344
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1345
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1346
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1347
        }
1348
#else
1349
        /* This is OK on x86 hosts */
1350
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1351
#endif
1352
    }
1353
#else
1354
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1355
    farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1356
#endif
1357
    return farg1.ll;
1358
}
1359

    
1360
/* fnmadd - fnmadd. */
1361
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1362
{
1363
    CPU_DoubleU farg1, farg2, farg3;
1364

    
1365
    farg1.ll = arg1;
1366
    farg2.ll = arg2;
1367
    farg3.ll = arg3;
1368

    
1369
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1370
                 float64_is_signaling_nan(farg2.d) ||
1371
                 float64_is_signaling_nan(farg3.d))) {
1372
        /* sNaN operation */
1373
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1374
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1375
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1376
        /* Multiplication of zero by infinity */
1377
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1378
    } else {
1379
#if USE_PRECISE_EMULATION
1380
#ifdef FLOAT128
1381
        /* This is the way the PowerPC specification defines it */
1382
        float128 ft0_128, ft1_128;
1383

    
1384
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1385
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1386
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1387
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1388
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1389
            /* Magnitude subtraction of infinities */
1390
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1391
        } else {
1392
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1393
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1394
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1395
        }
1396
#else
1397
        /* This is OK on x86 hosts */
1398
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1399
#endif
1400
#else
1401
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1402
        farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1403
#endif
1404
        if (likely(!float64_is_nan(farg1.d)))
1405
            farg1.d = float64_chs(farg1.d);
1406
    }
1407
    return farg1.ll;
1408
}
1409

    
1410
/* fnmsub - fnmsub. */
1411
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1412
{
1413
    CPU_DoubleU farg1, farg2, farg3;
1414

    
1415
    farg1.ll = arg1;
1416
    farg2.ll = arg2;
1417
    farg3.ll = arg3;
1418

    
1419
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1420
                 float64_is_signaling_nan(farg2.d) ||
1421
                 float64_is_signaling_nan(farg3.d))) {
1422
        /* sNaN operation */
1423
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1424
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1425
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1426
        /* Multiplication of zero by infinity */
1427
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1428
    } else {
1429
#if USE_PRECISE_EMULATION
1430
#ifdef FLOAT128
1431
        /* This is the way the PowerPC specification defines it */
1432
        float128 ft0_128, ft1_128;
1433

    
1434
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1435
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1436
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1437
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1438
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1439
            /* Magnitude subtraction of infinities */
1440
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1441
        } else {
1442
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1443
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1444
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1445
        }
1446
#else
1447
        /* This is OK on x86 hosts */
1448
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1449
#endif
1450
#else
1451
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1452
        farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1453
#endif
1454
        if (likely(!float64_is_nan(farg1.d)))
1455
            farg1.d = float64_chs(farg1.d);
1456
    }
1457
    return farg1.ll;
1458
}
1459

    
1460
/* frsp - frsp. */
1461
uint64_t helper_frsp (uint64_t arg)
1462
{
1463
    CPU_DoubleU farg;
1464
    float32 f32;
1465
    farg.ll = arg;
1466

    
1467
#if USE_PRECISE_EMULATION
1468
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1469
        /* sNaN square root */
1470
       farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1471
    } else {
1472
       f32 = float64_to_float32(farg.d, &env->fp_status);
1473
       farg.d = float32_to_float64(f32, &env->fp_status);
1474
    }
1475
#else
1476
    f32 = float64_to_float32(farg.d, &env->fp_status);
1477
    farg.d = float32_to_float64(f32, &env->fp_status);
1478
#endif
1479
    return farg.ll;
1480
}
1481

    
1482
/* fsqrt - fsqrt. */
1483
uint64_t helper_fsqrt (uint64_t arg)
1484
{
1485
    CPU_DoubleU farg;
1486
    farg.ll = arg;
1487

    
1488
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1489
        /* sNaN square root */
1490
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1491
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1492
        /* Square root of a negative nonzero number */
1493
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1494
    } else {
1495
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1496
    }
1497
    return farg.ll;
1498
}
1499

    
1500
/* fre - fre. */
1501
uint64_t helper_fre (uint64_t arg)
1502
{
1503
    CPU_DoubleU fone, farg;
1504
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1505
    farg.ll = arg;
1506

    
1507
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1508
        /* sNaN reciprocal */
1509
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1510
    } else {
1511
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1512
    }
1513
    return farg.d;
1514
}
1515

    
1516
/* fres - fres. */
1517
uint64_t helper_fres (uint64_t arg)
1518
{
1519
    CPU_DoubleU fone, farg;
1520
    float32 f32;
1521
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1522
    farg.ll = arg;
1523

    
1524
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1525
        /* sNaN reciprocal */
1526
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1527
    } else {
1528
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1529
        f32 = float64_to_float32(farg.d, &env->fp_status);
1530
        farg.d = float32_to_float64(f32, &env->fp_status);
1531
    }
1532
    return farg.ll;
1533
}
1534

    
1535
/* frsqrte  - frsqrte. */
1536
uint64_t helper_frsqrte (uint64_t arg)
1537
{
1538
    CPU_DoubleU fone, farg;
1539
    float32 f32;
1540
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1541
    farg.ll = arg;
1542

    
1543
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1544
        /* sNaN reciprocal square root */
1545
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1546
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1547
        /* Reciprocal square root of a negative nonzero number */
1548
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1549
    } else {
1550
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1551
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1552
        f32 = float64_to_float32(farg.d, &env->fp_status);
1553
        farg.d = float32_to_float64(f32, &env->fp_status);
1554
    }
1555
    return farg.ll;
1556
}
1557

    
1558
/* fsel - fsel. */
1559
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1560
{
1561
    CPU_DoubleU farg1;
1562

    
1563
    farg1.ll = arg1;
1564

    
1565
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1566
        return arg2;
1567
    else
1568
        return arg3;
1569
}
1570

    
1571
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1572
{
1573
    CPU_DoubleU farg1, farg2;
1574
    uint32_t ret = 0;
1575
    farg1.ll = arg1;
1576
    farg2.ll = arg2;
1577

    
1578
    if (unlikely(float64_is_nan(farg1.d) ||
1579
                 float64_is_nan(farg2.d))) {
1580
        ret = 0x01UL;
1581
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1582
        ret = 0x08UL;
1583
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1584
        ret = 0x04UL;
1585
    } else {
1586
        ret = 0x02UL;
1587
    }
1588

    
1589
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1590
    env->fpscr |= ret << FPSCR_FPRF;
1591
    env->crf[crfD] = ret;
1592
    if (unlikely(ret == 0x01UL
1593
                 && (float64_is_signaling_nan(farg1.d) ||
1594
                     float64_is_signaling_nan(farg2.d)))) {
1595
        /* sNaN comparison */
1596
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1597
    }
1598
}
1599

    
1600
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1601
{
1602
    CPU_DoubleU farg1, farg2;
1603
    uint32_t ret = 0;
1604
    farg1.ll = arg1;
1605
    farg2.ll = arg2;
1606

    
1607
    if (unlikely(float64_is_nan(farg1.d) ||
1608
                 float64_is_nan(farg2.d))) {
1609
        ret = 0x01UL;
1610
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1611
        ret = 0x08UL;
1612
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1613
        ret = 0x04UL;
1614
    } else {
1615
        ret = 0x02UL;
1616
    }
1617

    
1618
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1619
    env->fpscr |= ret << FPSCR_FPRF;
1620
    env->crf[crfD] = ret;
1621
    if (unlikely (ret == 0x01UL)) {
1622
        if (float64_is_signaling_nan(farg1.d) ||
1623
            float64_is_signaling_nan(farg2.d)) {
1624
            /* sNaN comparison */
1625
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1626
                                  POWERPC_EXCP_FP_VXVC);
1627
        } else {
1628
            /* qNaN comparison */
1629
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1630
        }
1631
    }
1632
}
1633

    
1634
#if !defined (CONFIG_USER_ONLY)
1635
void helper_store_msr (target_ulong val)
1636
{
1637
    val = hreg_store_msr(env, val, 0);
1638
    if (val != 0) {
1639
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1640
        helper_raise_exception(val);
1641
    }
1642
}
1643

    
1644
static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1645
                                    target_ulong msrm, int keep_msrh)
1646
{
1647
#if defined(TARGET_PPC64)
1648
    if (msr & (1ULL << MSR_SF)) {
1649
        nip = (uint64_t)nip;
1650
        msr &= (uint64_t)msrm;
1651
    } else {
1652
        nip = (uint32_t)nip;
1653
        msr = (uint32_t)(msr & msrm);
1654
        if (keep_msrh)
1655
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1656
    }
1657
#else
1658
    nip = (uint32_t)nip;
1659
    msr &= (uint32_t)msrm;
1660
#endif
1661
    /* XXX: beware: this is false if VLE is supported */
1662
    env->nip = nip & ~((target_ulong)0x00000003);
1663
    hreg_store_msr(env, msr, 1);
1664
#if defined (DEBUG_OP)
1665
    cpu_dump_rfi(env->nip, env->msr);
1666
#endif
1667
    /* No need to raise an exception here,
1668
     * as rfi is always the last insn of a TB
1669
     */
1670
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1671
}
1672

    
1673
void helper_rfi (void)
1674
{
1675
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1676
           ~((target_ulong)0xFFFF0000), 1);
1677
}
1678

    
1679
#if defined(TARGET_PPC64)
1680
void helper_rfid (void)
1681
{
1682
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1683
           ~((target_ulong)0xFFFF0000), 0);
1684
}
1685

    
1686
void helper_hrfid (void)
1687
{
1688
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1689
           ~((target_ulong)0xFFFF0000), 0);
1690
}
1691
#endif
1692
#endif
1693

    
1694
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1695
{
1696
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1697
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1698
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1699
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1700
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1701
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1702
    }
1703
}
1704

    
1705
#if defined(TARGET_PPC64)
1706
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1707
{
1708
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1709
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1710
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1711
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1712
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1713
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1714
}
1715
#endif
1716

    
1717
/*****************************************************************************/
1718
/* PowerPC 601 specific instructions (POWER bridge) */
1719

    
1720
target_ulong helper_clcs (uint32_t arg)
1721
{
1722
    switch (arg) {
1723
    case 0x0CUL:
1724
        /* Instruction cache line size */
1725
        return env->icache_line_size;
1726
        break;
1727
    case 0x0DUL:
1728
        /* Data cache line size */
1729
        return env->dcache_line_size;
1730
        break;
1731
    case 0x0EUL:
1732
        /* Minimum cache line size */
1733
        return (env->icache_line_size < env->dcache_line_size) ?
1734
                env->icache_line_size : env->dcache_line_size;
1735
        break;
1736
    case 0x0FUL:
1737
        /* Maximum cache line size */
1738
        return (env->icache_line_size > env->dcache_line_size) ?
1739
                env->icache_line_size : env->dcache_line_size;
1740
        break;
1741
    default:
1742
        /* Undefined */
1743
        return 0;
1744
        break;
1745
    }
1746
}
1747

    
1748
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1749
{
1750
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1751

    
1752
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1753
        (int32_t)arg2 == 0) {
1754
        env->spr[SPR_MQ] = 0;
1755
        return INT32_MIN;
1756
    } else {
1757
        env->spr[SPR_MQ] = tmp % arg2;
1758
        return  tmp / (int32_t)arg2;
1759
    }
1760
}
1761

    
1762
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1763
{
1764
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1765

    
1766
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1767
        (int32_t)arg2 == 0) {
1768
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1769
        env->spr[SPR_MQ] = 0;
1770
        return INT32_MIN;
1771
    } else {
1772
        env->spr[SPR_MQ] = tmp % arg2;
1773
        tmp /= (int32_t)arg2;
1774
        if ((int32_t)tmp != tmp) {
1775
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1776
        } else {
1777
            env->xer &= ~(1 << XER_OV);
1778
        }
1779
        return tmp;
1780
    }
1781
}
1782

    
1783
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1784
{
1785
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1786
        (int32_t)arg2 == 0) {
1787
        env->spr[SPR_MQ] = 0;
1788
        return INT32_MIN;
1789
    } else {
1790
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1791
        return (int32_t)arg1 / (int32_t)arg2;
1792
    }
1793
}
1794

    
1795
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1796
{
1797
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1798
        (int32_t)arg2 == 0) {
1799
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1800
        env->spr[SPR_MQ] = 0;
1801
        return INT32_MIN;
1802
    } else {
1803
        env->xer &= ~(1 << XER_OV);
1804
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1805
        return (int32_t)arg1 / (int32_t)arg2;
1806
    }
1807
}
1808

    
1809
#if !defined (CONFIG_USER_ONLY)
1810
target_ulong helper_rac (target_ulong addr)
1811
{
1812
    mmu_ctx_t ctx;
1813
    int nb_BATs;
1814
    target_ulong ret = 0;
1815

    
1816
    /* We don't have to generate many instances of this instruction,
1817
     * as rac is supervisor only.
1818
     */
1819
    /* XXX: FIX THIS: Pretend we have no BAT */
1820
    nb_BATs = env->nb_BATs;
1821
    env->nb_BATs = 0;
1822
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1823
        ret = ctx.raddr;
1824
    env->nb_BATs = nb_BATs;
1825
    return ret;
1826
}
1827

    
1828
void helper_rfsvc (void)
1829
{
1830
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1831
}
1832
#endif
1833

    
1834
/*****************************************************************************/
1835
/* 602 specific instructions */
1836
/* mfrom is the most crazy instruction ever seen, imho ! */
1837
/* Real implementation uses a ROM table. Do the same */
1838
/* Extremly decomposed:
1839
 *                      -arg / 256
1840
 * return 256 * log10(10           + 1.0) + 0.5
1841
 */
1842
#if !defined (CONFIG_USER_ONLY)
1843
target_ulong helper_602_mfrom (target_ulong arg)
1844
{
1845
    if (likely(arg < 602)) {
1846
#include "mfrom_table.c"
1847
        return mfrom_ROM_table[arg];
1848
    } else {
1849
        return 0;
1850
    }
1851
}
1852
#endif
1853

    
1854
/*****************************************************************************/
1855
/* Embedded PowerPC specific helpers */
1856

    
1857
/* XXX: to be improved to check access rights when in user-mode */
1858
target_ulong helper_load_dcr (target_ulong dcrn)
1859
{
1860
    target_ulong val = 0;
1861

    
1862
    if (unlikely(env->dcr_env == NULL)) {
1863
        if (loglevel != 0) {
1864
            fprintf(logfile, "No DCR environment\n");
1865
        }
1866
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1867
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1868
    } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1869
        if (loglevel != 0) {
1870
            fprintf(logfile, "DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1871
        }
1872
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1873
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1874
    }
1875
    return val;
1876
}
1877

    
1878
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1879
{
1880
    if (unlikely(env->dcr_env == NULL)) {
1881
        if (loglevel != 0) {
1882
            fprintf(logfile, "No DCR environment\n");
1883
        }
1884
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1885
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1886
    } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1887
        if (loglevel != 0) {
1888
            fprintf(logfile, "DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1889
        }
1890
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1891
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1892
    }
1893
}
1894

    
1895
#if !defined(CONFIG_USER_ONLY)
1896
void helper_40x_rfci (void)
1897
{
1898
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1899
           ~((target_ulong)0xFFFF0000), 0);
1900
}
1901

    
1902
void helper_rfci (void)
1903
{
1904
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1905
           ~((target_ulong)0x3FFF0000), 0);
1906
}
1907

    
1908
void helper_rfdi (void)
1909
{
1910
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1911
           ~((target_ulong)0x3FFF0000), 0);
1912
}
1913

    
1914
void helper_rfmci (void)
1915
{
1916
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1917
           ~((target_ulong)0x3FFF0000), 0);
1918
}
1919
#endif
1920

    
1921
/* 440 specific */
1922
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1923
{
1924
    target_ulong mask;
1925
    int i;
1926

    
1927
    i = 1;
1928
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1929
        if ((high & mask) == 0) {
1930
            if (update_Rc) {
1931
                env->crf[0] = 0x4;
1932
            }
1933
            goto done;
1934
        }
1935
        i++;
1936
    }
1937
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1938
        if ((low & mask) == 0) {
1939
            if (update_Rc) {
1940
                env->crf[0] = 0x8;
1941
            }
1942
            goto done;
1943
        }
1944
        i++;
1945
    }
1946
    if (update_Rc) {
1947
        env->crf[0] = 0x2;
1948
    }
1949
 done:
1950
    env->xer = (env->xer & ~0x7F) | i;
1951
    if (update_Rc) {
1952
        env->crf[0] |= xer_so;
1953
    }
1954
    return i;
1955
}
1956

    
1957
/*****************************************************************************/
1958
/* Altivec extension helpers */
1959
#if defined(WORDS_BIGENDIAN)
1960
#define HI_IDX 0
1961
#define LO_IDX 1
1962
#else
1963
#define HI_IDX 1
1964
#define LO_IDX 0
1965
#endif
1966

    
1967
#if defined(WORDS_BIGENDIAN)
1968
#define VECTOR_FOR_INORDER_I(index, element)            \
1969
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1970
#else
1971
#define VECTOR_FOR_INORDER_I(index, element)            \
1972
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1973
#endif
1974

    
1975
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
1976
{
1977
    int i, j = (sh & 0xf);
1978

    
1979
    VECTOR_FOR_INORDER_I (i, u8) {
1980
        r->u8[i] = j++;
1981
    }
1982
}
1983

    
1984
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
1985
{
1986
    int i, j = 0x10 - (sh & 0xf);
1987

    
1988
    VECTOR_FOR_INORDER_I (i, u8) {
1989
        r->u8[i] = j++;
1990
    }
1991
}
1992

    
1993
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1994
{
1995
    int i;
1996
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1997
        r->u32[i] = ~a->u32[i] < b->u32[i];
1998
    }
1999
}
2000

    
2001
#define VARITH_DO(name, op, element)        \
2002
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2003
{                                                                       \
2004
    int i;                                                              \
2005
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2006
        r->element[i] = a->element[i] op b->element[i];                 \
2007
    }                                                                   \
2008
}
2009
#define VARITH(suffix, element)                  \
2010
  VARITH_DO(add##suffix, +, element)             \
2011
  VARITH_DO(sub##suffix, -, element)
2012
VARITH(ubm, u8)
2013
VARITH(uhm, u16)
2014
VARITH(uwm, u32)
2015
#undef VARITH_DO
2016
#undef VARITH
2017

    
2018
#define VAVG_DO(name, element, etype)                                   \
2019
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2020
    {                                                                   \
2021
        int i;                                                          \
2022
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2023
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2024
            r->element[i] = x >> 1;                                     \
2025
        }                                                               \
2026
    }
2027

    
2028
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2029
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2030
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2031
VAVG(b, s8, int16_t, u8, uint16_t)
2032
VAVG(h, s16, int32_t, u16, uint32_t)
2033
VAVG(w, s32, int64_t, u32, uint64_t)
2034
#undef VAVG_DO
2035
#undef VAVG
2036

    
2037
#define VMINMAX_DO(name, compare, element)                              \
2038
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2039
    {                                                                   \
2040
        int i;                                                          \
2041
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2042
            if (a->element[i] compare b->element[i]) {                  \
2043
                r->element[i] = b->element[i];                          \
2044
            } else {                                                    \
2045
                r->element[i] = a->element[i];                          \
2046
            }                                                           \
2047
        }                                                               \
2048
    }
2049
#define VMINMAX(suffix, element)                \
2050
  VMINMAX_DO(min##suffix, >, element)           \
2051
  VMINMAX_DO(max##suffix, <, element)
2052
VMINMAX(sb, s8)
2053
VMINMAX(sh, s16)
2054
VMINMAX(sw, s32)
2055
VMINMAX(ub, u8)
2056
VMINMAX(uh, u16)
2057
VMINMAX(uw, u32)
2058
#undef VMINMAX_DO
2059
#undef VMINMAX
2060

    
2061
#define VMRG_DO(name, element, highp)                                   \
2062
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2063
    {                                                                   \
2064
        ppc_avr_t result;                                               \
2065
        int i;                                                          \
2066
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2067
        for (i = 0; i < n_elems/2; i++) {                               \
2068
            if (highp) {                                                \
2069
                result.element[i*2+HI_IDX] = a->element[i];             \
2070
                result.element[i*2+LO_IDX] = b->element[i];             \
2071
            } else {                                                    \
2072
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2073
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2074
            }                                                           \
2075
        }                                                               \
2076
        *r = result;                                                    \
2077
    }
2078
#if defined(WORDS_BIGENDIAN)
2079
#define MRGHI 0
2080
#define MRGL0 1
2081
#else
2082
#define MRGHI 1
2083
#define MRGLO 0
2084
#endif
2085
#define VMRG(suffix, element)                   \
2086
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2087
  VMRG_DO(mrgh##suffix, element, MRGLO)
2088
VMRG(b, u8)
2089
VMRG(h, u16)
2090
VMRG(w, u32)
2091
#undef VMRG_DO
2092
#undef VMRG
2093
#undef MRGHI
2094
#undef MRGLO
2095

    
2096
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2097
{
2098
    int32_t prod[16];
2099
    int i;
2100

    
2101
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2102
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2103
    }
2104

    
2105
    VECTOR_FOR_INORDER_I(i, s32) {
2106
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2107
    }
2108
}
2109

    
2110
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2111
{
2112
    uint16_t prod[16];
2113
    int i;
2114

    
2115
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2116
        prod[i] = a->u8[i] * b->u8[i];
2117
    }
2118

    
2119
    VECTOR_FOR_INORDER_I(i, u32) {
2120
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2121
    }
2122
}
2123

    
2124
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2125
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2126
    {                                                                   \
2127
        int i;                                                          \
2128
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2129
            if (evenp) {                                                \
2130
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2131
            } else {                                                    \
2132
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2133
            }                                                           \
2134
        }                                                               \
2135
    }
2136
#define VMUL(suffix, mul_element, prod_element) \
2137
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2138
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2139
VMUL(sb, s8, s16)
2140
VMUL(sh, s16, s32)
2141
VMUL(ub, u8, u16)
2142
VMUL(uh, u16, u32)
2143
#undef VMUL_DO
2144
#undef VMUL
2145

    
2146
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2147
{
2148
    ppc_avr_t result;
2149
    int i;
2150
    VECTOR_FOR_INORDER_I (i, u8) {
2151
        int s = c->u8[i] & 0x1f;
2152
#if defined(WORDS_BIGENDIAN)
2153
        int index = s & 0xf;
2154
#else
2155
        int index = 15 - (s & 0xf);
2156
#endif
2157
        if (s & 0x10) {
2158
            result.u8[i] = b->u8[index];
2159
        } else {
2160
            result.u8[i] = a->u8[index];
2161
        }
2162
    }
2163
    *r = result;
2164
}
2165

    
2166
#define VROTATE(suffix, element)                                        \
2167
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2168
    {                                                                   \
2169
        int i;                                                          \
2170
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2171
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2172
            unsigned int shift = b->element[i] & mask;                  \
2173
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2174
        }                                                               \
2175
    }
2176
VROTATE(b, u8)
2177
VROTATE(h, u16)
2178
VROTATE(w, u32)
2179
#undef VROTATE
2180

    
2181
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2182
{
2183
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2184
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2185
}
2186

    
2187
#define VSL(suffix, element)                                            \
2188
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2189
    {                                                                   \
2190
        int i;                                                          \
2191
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2192
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2193
            unsigned int shift = b->element[i] & mask;                  \
2194
            r->element[i] = a->element[i] << shift;                     \
2195
        }                                                               \
2196
    }
2197
VSL(b, u8)
2198
VSL(h, u16)
2199
VSL(w, u32)
2200
#undef VSL
2201

    
2202
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2203
{
2204
    int sh = shift & 0xf;
2205
    int i;
2206
    ppc_avr_t result;
2207

    
2208
#if defined(WORDS_BIGENDIAN)
2209
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2210
        int index = sh + i;
2211
        if (index > 0xf) {
2212
            result.u8[i] = b->u8[index-0x10];
2213
        } else {
2214
            result.u8[i] = a->u8[index];
2215
        }
2216
    }
2217
#else
2218
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2219
        int index = (16 - sh) + i;
2220
        if (index > 0xf) {
2221
            result.u8[i] = a->u8[index-0x10];
2222
        } else {
2223
            result.u8[i] = b->u8[index];
2224
        }
2225
    }
2226
#endif
2227
    *r = result;
2228
}
2229

    
2230
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2231
{
2232
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2233

    
2234
#if defined (WORDS_BIGENDIAN)
2235
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2236
  memset (&r->u8[16-sh], 0, sh);
2237
#else
2238
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2239
  memset (&r->u8[0], 0, sh);
2240
#endif
2241
}
2242

    
2243
/* Experimental testing shows that hardware masks the immediate.  */
2244
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2245
#if defined(WORDS_BIGENDIAN)
2246
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2247
#else
2248
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2249
#endif
2250
#define VSPLT(suffix, element)                                          \
2251
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2252
    {                                                                   \
2253
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2254
        int i;                                                          \
2255
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2256
            r->element[i] = s;                                          \
2257
        }                                                               \
2258
    }
2259
VSPLT(b, u8)
2260
VSPLT(h, u16)
2261
VSPLT(w, u32)
2262
#undef VSPLT
2263
#undef SPLAT_ELEMENT
2264
#undef _SPLAT_MASKED
2265

    
2266
#define VSR(suffix, element)                                            \
2267
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2268
    {                                                                   \
2269
        int i;                                                          \
2270
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2271
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2272
            unsigned int shift = b->element[i] & mask;                  \
2273
            r->element[i] = a->element[i] >> shift;                     \
2274
        }                                                               \
2275
    }
2276
VSR(ab, s8)
2277
VSR(ah, s16)
2278
VSR(aw, s32)
2279
VSR(b, u8)
2280
VSR(h, u16)
2281
VSR(w, u32)
2282
#undef VSR
2283

    
2284
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2285
{
2286
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2287

    
2288
#if defined (WORDS_BIGENDIAN)
2289
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2290
  memset (&r->u8[0], 0, sh);
2291
#else
2292
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2293
  memset (&r->u8[16-sh], 0, sh);
2294
#endif
2295
}
2296

    
2297
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2298
{
2299
    int i;
2300
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2301
        r->u32[i] = a->u32[i] >= b->u32[i];
2302
    }
2303
}
2304

    
2305
#if defined(WORDS_BIGENDIAN)
2306
#define UPKHI 1
2307
#define UPKLO 0
2308
#else
2309
#define UPKHI 0
2310
#define UPKLO 1
2311
#endif
2312
#define VUPKPX(suffix, hi)                                      \
2313
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
2314
    {                                                           \
2315
        int i;                                                  \
2316
        ppc_avr_t result;                                       \
2317
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
2318
            uint16_t e = b->u16[hi ? i : i+4];                  \
2319
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
2320
            uint8_t r = (e >> 10) & 0x1f;                       \
2321
            uint8_t g = (e >> 5) & 0x1f;                        \
2322
            uint8_t b = e & 0x1f;                               \
2323
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
2324
        }                                                               \
2325
        *r = result;                                                    \
2326
    }
2327
VUPKPX(lpx, UPKLO)
2328
VUPKPX(hpx, UPKHI)
2329
#undef VUPKPX
2330

    
2331
#define VUPK(suffix, unpacked, packee, hi)                              \
2332
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2333
    {                                                                   \
2334
        int i;                                                          \
2335
        ppc_avr_t result;                                               \
2336
        if (hi) {                                                       \
2337
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
2338
                result.unpacked[i] = b->packee[i];                      \
2339
            }                                                           \
2340
        } else {                                                        \
2341
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
2342
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
2343
            }                                                           \
2344
        }                                                               \
2345
        *r = result;                                                    \
2346
    }
2347
VUPK(hsb, s16, s8, UPKHI)
2348
VUPK(hsh, s32, s16, UPKHI)
2349
VUPK(lsb, s16, s8, UPKLO)
2350
VUPK(lsh, s32, s16, UPKLO)
2351
#undef VUPK
2352
#undef UPKHI
2353
#undef UPKLO
2354

    
2355
#undef VECTOR_FOR_INORDER_I
2356
#undef HI_IDX
2357
#undef LO_IDX
2358

    
2359
/*****************************************************************************/
2360
/* SPE extension helpers */
2361
/* Use a table to make this quicker */
2362
static uint8_t hbrev[16] = {
2363
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2364
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
2365
};
2366

    
2367
static always_inline uint8_t byte_reverse (uint8_t val)
2368
{
2369
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
2370
}
2371

    
2372
static always_inline uint32_t word_reverse (uint32_t val)
2373
{
2374
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2375
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2376
}
2377

    
2378
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2379
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2380
{
2381
    uint32_t a, b, d, mask;
2382

    
2383
    mask = UINT32_MAX >> (32 - MASKBITS);
2384
    a = arg1 & mask;
2385
    b = arg2 & mask;
2386
    d = word_reverse(1 + word_reverse(a | ~b));
2387
    return (arg1 & ~mask) | (d & b);
2388
}
2389

    
2390
uint32_t helper_cntlsw32 (uint32_t val)
2391
{
2392
    if (val & 0x80000000)
2393
        return clz32(~val);
2394
    else
2395
        return clz32(val);
2396
}
2397

    
2398
uint32_t helper_cntlzw32 (uint32_t val)
2399
{
2400
    return clz32(val);
2401
}
2402

    
2403
/* Single-precision floating-point conversions */
2404
static always_inline uint32_t efscfsi (uint32_t val)
2405
{
2406
    CPU_FloatU u;
2407

    
2408
    u.f = int32_to_float32(val, &env->spe_status);
2409

    
2410
    return u.l;
2411
}
2412

    
2413
static always_inline uint32_t efscfui (uint32_t val)
2414
{
2415
    CPU_FloatU u;
2416

    
2417
    u.f = uint32_to_float32(val, &env->spe_status);
2418

    
2419
    return u.l;
2420
}
2421

    
2422
static always_inline int32_t efsctsi (uint32_t val)
2423
{
2424
    CPU_FloatU u;
2425

    
2426
    u.l = val;
2427
    /* NaN are not treated the same way IEEE 754 does */
2428
    if (unlikely(float32_is_nan(u.f)))
2429
        return 0;
2430

    
2431
    return float32_to_int32(u.f, &env->spe_status);
2432
}
2433

    
2434
static always_inline uint32_t efsctui (uint32_t val)
2435
{
2436
    CPU_FloatU u;
2437

    
2438
    u.l = val;
2439
    /* NaN are not treated the same way IEEE 754 does */
2440
    if (unlikely(float32_is_nan(u.f)))
2441
        return 0;
2442

    
2443
    return float32_to_uint32(u.f, &env->spe_status);
2444
}
2445

    
2446
static always_inline uint32_t efsctsiz (uint32_t val)
2447
{
2448
    CPU_FloatU u;
2449

    
2450
    u.l = val;
2451
    /* NaN are not treated the same way IEEE 754 does */
2452
    if (unlikely(float32_is_nan(u.f)))
2453
        return 0;
2454

    
2455
    return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2456
}
2457

    
2458
static always_inline uint32_t efsctuiz (uint32_t val)
2459
{
2460
    CPU_FloatU u;
2461

    
2462
    u.l = val;
2463
    /* NaN are not treated the same way IEEE 754 does */
2464
    if (unlikely(float32_is_nan(u.f)))
2465
        return 0;
2466

    
2467
    return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2468
}
2469

    
2470
static always_inline uint32_t efscfsf (uint32_t val)
2471
{
2472
    CPU_FloatU u;
2473
    float32 tmp;
2474

    
2475
    u.f = int32_to_float32(val, &env->spe_status);
2476
    tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2477
    u.f = float32_div(u.f, tmp, &env->spe_status);
2478

    
2479
    return u.l;
2480
}
2481

    
2482
static always_inline uint32_t efscfuf (uint32_t val)
2483
{
2484
    CPU_FloatU u;
2485
    float32 tmp;
2486

    
2487
    u.f = uint32_to_float32(val, &env->spe_status);
2488
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2489
    u.f = float32_div(u.f, tmp, &env->spe_status);
2490

    
2491
    return u.l;
2492
}
2493

    
2494
static always_inline uint32_t efsctsf (uint32_t val)
2495
{
2496
    CPU_FloatU u;
2497
    float32 tmp;
2498

    
2499
    u.l = val;
2500
    /* NaN are not treated the same way IEEE 754 does */
2501
    if (unlikely(float32_is_nan(u.f)))
2502
        return 0;
2503
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2504
    u.f = float32_mul(u.f, tmp, &env->spe_status);
2505

    
2506
    return float32_to_int32(u.f, &env->spe_status);
2507
}
2508

    
2509
static always_inline uint32_t efsctuf (uint32_t val)
2510
{
2511
    CPU_FloatU u;
2512
    float32 tmp;
2513

    
2514
    u.l = val;
2515
    /* NaN are not treated the same way IEEE 754 does */
2516
    if (unlikely(float32_is_nan(u.f)))
2517
        return 0;
2518
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2519
    u.f = float32_mul(u.f, tmp, &env->spe_status);
2520

    
2521
    return float32_to_uint32(u.f, &env->spe_status);
2522
}
2523

    
2524
#define HELPER_SPE_SINGLE_CONV(name)                                          \
2525
uint32_t helper_e##name (uint32_t val)                                        \
2526
{                                                                             \
2527
    return e##name(val);                                                      \
2528
}
2529
/* efscfsi */
2530
HELPER_SPE_SINGLE_CONV(fscfsi);
2531
/* efscfui */
2532
HELPER_SPE_SINGLE_CONV(fscfui);
2533
/* efscfuf */
2534
HELPER_SPE_SINGLE_CONV(fscfuf);
2535
/* efscfsf */
2536
HELPER_SPE_SINGLE_CONV(fscfsf);
2537
/* efsctsi */
2538
HELPER_SPE_SINGLE_CONV(fsctsi);
2539
/* efsctui */
2540
HELPER_SPE_SINGLE_CONV(fsctui);
2541
/* efsctsiz */
2542
HELPER_SPE_SINGLE_CONV(fsctsiz);
2543
/* efsctuiz */
2544
HELPER_SPE_SINGLE_CONV(fsctuiz);
2545
/* efsctsf */
2546
HELPER_SPE_SINGLE_CONV(fsctsf);
2547
/* efsctuf */
2548
HELPER_SPE_SINGLE_CONV(fsctuf);
2549

    
2550
#define HELPER_SPE_VECTOR_CONV(name)                                          \
2551
uint64_t helper_ev##name (uint64_t val)                                       \
2552
{                                                                             \
2553
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
2554
            (uint64_t)e##name(val);                                           \
2555
}
2556
/* evfscfsi */
2557
HELPER_SPE_VECTOR_CONV(fscfsi);
2558
/* evfscfui */
2559
HELPER_SPE_VECTOR_CONV(fscfui);
2560
/* evfscfuf */
2561
HELPER_SPE_VECTOR_CONV(fscfuf);
2562
/* evfscfsf */
2563
HELPER_SPE_VECTOR_CONV(fscfsf);
2564
/* evfsctsi */
2565
HELPER_SPE_VECTOR_CONV(fsctsi);
2566
/* evfsctui */
2567
HELPER_SPE_VECTOR_CONV(fsctui);
2568
/* evfsctsiz */
2569
HELPER_SPE_VECTOR_CONV(fsctsiz);
2570
/* evfsctuiz */
2571
HELPER_SPE_VECTOR_CONV(fsctuiz);
2572
/* evfsctsf */
2573
HELPER_SPE_VECTOR_CONV(fsctsf);
2574
/* evfsctuf */
2575
HELPER_SPE_VECTOR_CONV(fsctuf);
2576

    
2577
/* Single-precision floating-point arithmetic */
2578
static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
2579
{
2580
    CPU_FloatU u1, u2;
2581
    u1.l = op1;
2582
    u2.l = op2;
2583
    u1.f = float32_add(u1.f, u2.f, &env->spe_status);
2584
    return u1.l;
2585
}
2586

    
2587
static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
2588
{
2589
    CPU_FloatU u1, u2;
2590
    u1.l = op1;
2591
    u2.l = op2;
2592
    u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
2593
    return u1.l;
2594
}
2595

    
2596
static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
2597
{
2598
    CPU_FloatU u1, u2;
2599
    u1.l = op1;
2600
    u2.l = op2;
2601
    u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
2602
    return u1.l;
2603
}
2604

    
2605
static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
2606
{
2607
    CPU_FloatU u1, u2;
2608
    u1.l = op1;
2609
    u2.l = op2;
2610
    u1.f = float32_div(u1.f, u2.f, &env->spe_status);
2611
    return u1.l;
2612
}
2613

    
2614
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
2615
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
2616
{                                                                             \
2617
    return e##name(op1, op2);                                                 \
2618
}
2619
/* efsadd */
2620
HELPER_SPE_SINGLE_ARITH(fsadd);
2621
/* efssub */
2622
HELPER_SPE_SINGLE_ARITH(fssub);
2623
/* efsmul */
2624
HELPER_SPE_SINGLE_ARITH(fsmul);
2625
/* efsdiv */
2626
HELPER_SPE_SINGLE_ARITH(fsdiv);
2627

    
2628
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
2629
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
2630
{                                                                             \
2631
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
2632
            (uint64_t)e##name(op1, op2);                                      \
2633
}
2634
/* evfsadd */
2635
HELPER_SPE_VECTOR_ARITH(fsadd);
2636
/* evfssub */
2637
HELPER_SPE_VECTOR_ARITH(fssub);
2638
/* evfsmul */
2639
HELPER_SPE_VECTOR_ARITH(fsmul);
2640
/* evfsdiv */
2641
HELPER_SPE_VECTOR_ARITH(fsdiv);
2642

    
2643
/* Single-precision floating-point comparisons */
2644
static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
2645
{
2646
    CPU_FloatU u1, u2;
2647
    u1.l = op1;
2648
    u2.l = op2;
2649
    return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2650
}
2651

    
2652
static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
2653
{
2654
    CPU_FloatU u1, u2;
2655
    u1.l = op1;
2656
    u2.l = op2;
2657
    return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
2658
}
2659

    
2660
static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
2661
{
2662
    CPU_FloatU u1, u2;
2663
    u1.l = op1;
2664
    u2.l = op2;
2665
    return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2666
}
2667

    
2668
static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
2669
{
2670
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2671
    return efststlt(op1, op2);
2672
}
2673

    
2674
static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
2675
{
2676
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2677
    return efststgt(op1, op2);
2678
}
2679

    
2680
static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
2681
{
2682
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2683
    return efststeq(op1, op2);
2684
}
2685

    
2686
#define HELPER_SINGLE_SPE_CMP(name)                                           \
2687
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
2688
{                                                                             \
2689
    return e##name(op1, op2) << 2;                                            \
2690
}
2691
/* efststlt */
2692
HELPER_SINGLE_SPE_CMP(fststlt);
2693
/* efststgt */
2694
HELPER_SINGLE_SPE_CMP(fststgt);
2695
/* efststeq */
2696
HELPER_SINGLE_SPE_CMP(fststeq);
2697
/* efscmplt */
2698
HELPER_SINGLE_SPE_CMP(fscmplt);
2699
/* efscmpgt */
2700
HELPER_SINGLE_SPE_CMP(fscmpgt);
2701
/* efscmpeq */
2702
HELPER_SINGLE_SPE_CMP(fscmpeq);
2703

    
2704
static always_inline uint32_t evcmp_merge (int t0, int t1)
2705
{
2706
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
2707
}
2708

    
2709
#define HELPER_VECTOR_SPE_CMP(name)                                           \
2710
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
2711
{                                                                             \
2712
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
2713
}
2714
/* evfststlt */
2715
HELPER_VECTOR_SPE_CMP(fststlt);
2716
/* evfststgt */
2717
HELPER_VECTOR_SPE_CMP(fststgt);
2718
/* evfststeq */
2719
HELPER_VECTOR_SPE_CMP(fststeq);
2720
/* evfscmplt */
2721
HELPER_VECTOR_SPE_CMP(fscmplt);
2722
/* evfscmpgt */
2723
HELPER_VECTOR_SPE_CMP(fscmpgt);
2724
/* evfscmpeq */
2725
HELPER_VECTOR_SPE_CMP(fscmpeq);
2726

    
2727
/* Double-precision floating-point conversion */
2728
uint64_t helper_efdcfsi (uint32_t val)
2729
{
2730
    CPU_DoubleU u;
2731

    
2732
    u.d = int32_to_float64(val, &env->spe_status);
2733

    
2734
    return u.ll;
2735
}
2736

    
2737
uint64_t helper_efdcfsid (uint64_t val)
2738
{
2739
    CPU_DoubleU u;
2740

    
2741
    u.d = int64_to_float64(val, &env->spe_status);
2742

    
2743
    return u.ll;
2744
}
2745

    
2746
uint64_t helper_efdcfui (uint32_t val)
2747
{
2748
    CPU_DoubleU u;
2749

    
2750
    u.d = uint32_to_float64(val, &env->spe_status);
2751

    
2752
    return u.ll;
2753
}
2754

    
2755
uint64_t helper_efdcfuid (uint64_t val)
2756
{
2757
    CPU_DoubleU u;
2758

    
2759
    u.d = uint64_to_float64(val, &env->spe_status);
2760

    
2761
    return u.ll;
2762
}
2763

    
2764
uint32_t helper_efdctsi (uint64_t val)
2765
{
2766
    CPU_DoubleU u;
2767

    
2768
    u.ll = val;
2769
    /* NaN are not treated the same way IEEE 754 does */
2770
    if (unlikely(float64_is_nan(u.d)))
2771
        return 0;
2772

    
2773
    return float64_to_int32(u.d, &env->spe_status);
2774
}
2775

    
2776
uint32_t helper_efdctui (uint64_t val)
2777
{
2778
    CPU_DoubleU u;
2779

    
2780
    u.ll = val;
2781
    /* NaN are not treated the same way IEEE 754 does */
2782
    if (unlikely(float64_is_nan(u.d)))
2783
        return 0;
2784

    
2785
    return float64_to_uint32(u.d, &env->spe_status);
2786
}
2787

    
2788
uint32_t helper_efdctsiz (uint64_t val)
2789
{
2790
    CPU_DoubleU u;
2791

    
2792
    u.ll = val;
2793
    /* NaN are not treated the same way IEEE 754 does */
2794
    if (unlikely(float64_is_nan(u.d)))
2795
        return 0;
2796

    
2797
    return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2798
}
2799

    
2800
uint64_t helper_efdctsidz (uint64_t val)
2801
{
2802
    CPU_DoubleU u;
2803

    
2804
    u.ll = val;
2805
    /* NaN are not treated the same way IEEE 754 does */
2806
    if (unlikely(float64_is_nan(u.d)))
2807
        return 0;
2808

    
2809
    return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2810
}
2811

    
2812
uint32_t helper_efdctuiz (uint64_t val)
2813
{
2814
    CPU_DoubleU u;
2815

    
2816
    u.ll = val;
2817
    /* NaN are not treated the same way IEEE 754 does */
2818
    if (unlikely(float64_is_nan(u.d)))
2819
        return 0;
2820

    
2821
    return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2822
}
2823

    
2824
uint64_t helper_efdctuidz (uint64_t val)
2825
{
2826
    CPU_DoubleU u;
2827

    
2828
    u.ll = val;
2829
    /* NaN are not treated the same way IEEE 754 does */
2830
    if (unlikely(float64_is_nan(u.d)))
2831
        return 0;
2832

    
2833
    return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2834
}
2835

    
2836
uint64_t helper_efdcfsf (uint32_t val)
2837
{
2838
    CPU_DoubleU u;
2839
    float64 tmp;
2840

    
2841
    u.d = int32_to_float64(val, &env->spe_status);
2842
    tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2843
    u.d = float64_div(u.d, tmp, &env->spe_status);
2844

    
2845
    return u.ll;
2846
}
2847

    
2848
uint64_t helper_efdcfuf (uint32_t val)
2849
{
2850
    CPU_DoubleU u;
2851
    float64 tmp;
2852

    
2853
    u.d = uint32_to_float64(val, &env->spe_status);
2854
    tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2855
    u.d = float64_div(u.d, tmp, &env->spe_status);
2856

    
2857
    return u.ll;
2858
}
2859

    
2860
uint32_t helper_efdctsf (uint64_t val)
2861
{
2862
    CPU_DoubleU u;
2863
    float64 tmp;
2864

    
2865
    u.ll = val;
2866
    /* NaN are not treated the same way IEEE 754 does */
2867
    if (unlikely(float64_is_nan(u.d)))
2868
        return 0;
2869
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2870
    u.d = float64_mul(u.d, tmp, &env->spe_status);
2871

    
2872
    return float64_to_int32(u.d, &env->spe_status);
2873
}
2874

    
2875
uint32_t helper_efdctuf (uint64_t val)
2876
{
2877
    CPU_DoubleU u;
2878
    float64 tmp;
2879

    
2880
    u.ll = val;
2881
    /* NaN are not treated the same way IEEE 754 does */
2882
    if (unlikely(float64_is_nan(u.d)))
2883
        return 0;
2884
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2885
    u.d = float64_mul(u.d, tmp, &env->spe_status);
2886

    
2887
    return float64_to_uint32(u.d, &env->spe_status);
2888
}
2889

    
2890
uint32_t helper_efscfd (uint64_t val)
2891
{
2892
    CPU_DoubleU u1;
2893
    CPU_FloatU u2;
2894

    
2895
    u1.ll = val;
2896
    u2.f = float64_to_float32(u1.d, &env->spe_status);
2897

    
2898
    return u2.l;
2899
}
2900

    
2901
uint64_t helper_efdcfs (uint32_t val)
2902
{
2903
    CPU_DoubleU u2;
2904
    CPU_FloatU u1;
2905

    
2906
    u1.l = val;
2907
    u2.d = float32_to_float64(u1.f, &env->spe_status);
2908

    
2909
    return u2.ll;
2910
}
2911

    
2912
/* Double precision fixed-point arithmetic */
2913
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
2914
{
2915
    CPU_DoubleU u1, u2;
2916
    u1.ll = op1;
2917
    u2.ll = op2;
2918
    u1.d = float64_add(u1.d, u2.d, &env->spe_status);
2919
    return u1.ll;
2920
}
2921

    
2922
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
2923
{
2924
    CPU_DoubleU u1, u2;
2925
    u1.ll = op1;
2926
    u2.ll = op2;
2927
    u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
2928
    return u1.ll;
2929
}
2930

    
2931
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
2932
{
2933
    CPU_DoubleU u1, u2;
2934
    u1.ll = op1;
2935
    u2.ll = op2;
2936
    u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
2937
    return u1.ll;
2938
}
2939

    
2940
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
2941
{
2942
    CPU_DoubleU u1, u2;
2943
    u1.ll = op1;
2944
    u2.ll = op2;
2945
    u1.d = float64_div(u1.d, u2.d, &env->spe_status);
2946
    return u1.ll;
2947
}
2948

    
2949
/* Double precision floating point helpers */
2950
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
2951
{
2952
    CPU_DoubleU u1, u2;
2953
    u1.ll = op1;
2954
    u2.ll = op2;
2955
    return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2956
}
2957

    
2958
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
2959
{
2960
    CPU_DoubleU u1, u2;
2961
    u1.ll = op1;
2962
    u2.ll = op2;
2963
    return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
2964
}
2965

    
2966
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
2967
{
2968
    CPU_DoubleU u1, u2;
2969
    u1.ll = op1;
2970
    u2.ll = op2;
2971
    return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2972
}
2973

    
2974
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
2975
{
2976
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2977
    return helper_efdtstlt(op1, op2);
2978
}
2979

    
2980
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
2981
{
2982
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2983
    return helper_efdtstgt(op1, op2);
2984
}
2985

    
2986
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
2987
{
2988
    /* XXX: TODO: test special values (NaN, infinites, ...) */
2989
    return helper_efdtsteq(op1, op2);
2990
}
2991

    
2992
/*****************************************************************************/
2993
/* Softmmu support */
2994
#if !defined (CONFIG_USER_ONLY)
2995

    
2996
#define MMUSUFFIX _mmu
2997

    
2998
#define SHIFT 0
2999
#include "softmmu_template.h"
3000

    
3001
#define SHIFT 1
3002
#include "softmmu_template.h"
3003

    
3004
#define SHIFT 2
3005
#include "softmmu_template.h"
3006

    
3007
#define SHIFT 3
3008
#include "softmmu_template.h"
3009

    
3010
/* try to fill the TLB and return an exception if error. If retaddr is
3011
   NULL, it means that the function was called in C code (i.e. not
3012
   from generated code or from helper.c) */
3013
/* XXX: fix it to restore all registers */
3014
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3015
{
3016
    TranslationBlock *tb;
3017
    CPUState *saved_env;
3018
    unsigned long pc;
3019
    int ret;
3020

    
3021
    /* XXX: hack to restore env in all cases, even if not called from
3022
       generated code */
3023
    saved_env = env;
3024
    env = cpu_single_env;
3025
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3026
    if (unlikely(ret != 0)) {
3027
        if (likely(retaddr)) {
3028
            /* now we have a real cpu fault */
3029
            pc = (unsigned long)retaddr;
3030
            tb = tb_find_pc(pc);
3031
            if (likely(tb)) {
3032
                /* the PC is inside the translated code. It means that we have
3033
                   a virtual CPU fault */
3034
                cpu_restore_state(tb, env, pc, NULL);
3035
            }
3036
        }
3037
        helper_raise_exception_err(env->exception_index, env->error_code);
3038
    }
3039
    env = saved_env;
3040
}
3041

    
3042
/* Segment registers load and store */
3043
target_ulong helper_load_sr (target_ulong sr_num)
3044
{
3045
    return env->sr[sr_num];
3046
}
3047

    
3048
void helper_store_sr (target_ulong sr_num, target_ulong val)
3049
{
3050
    ppc_store_sr(env, sr_num, val);
3051
}
3052

    
3053
/* SLB management */
3054
#if defined(TARGET_PPC64)
3055
target_ulong helper_load_slb (target_ulong slb_nr)
3056
{
3057
    return ppc_load_slb(env, slb_nr);
3058
}
3059

    
3060
void helper_store_slb (target_ulong slb_nr, target_ulong rs)
3061
{
3062
    ppc_store_slb(env, slb_nr, rs);
3063
}
3064

    
3065
void helper_slbia (void)
3066
{
3067
    ppc_slb_invalidate_all(env);
3068
}
3069

    
3070
void helper_slbie (target_ulong addr)
3071
{
3072
    ppc_slb_invalidate_one(env, addr);
3073
}
3074

    
3075
#endif /* defined(TARGET_PPC64) */
3076

    
3077
/* TLB management */
3078
void helper_tlbia (void)
3079
{
3080
    ppc_tlb_invalidate_all(env);
3081
}
3082

    
3083
void helper_tlbie (target_ulong addr)
3084
{
3085
    ppc_tlb_invalidate_one(env, addr);
3086
}
3087

    
3088
/* Software driven TLBs management */
3089
/* PowerPC 602/603 software TLB load instructions helpers */
3090
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3091
{
3092
    target_ulong RPN, CMP, EPN;
3093
    int way;
3094

    
3095
    RPN = env->spr[SPR_RPA];
3096
    if (is_code) {
3097
        CMP = env->spr[SPR_ICMP];
3098
        EPN = env->spr[SPR_IMISS];
3099
    } else {
3100
        CMP = env->spr[SPR_DCMP];
3101
        EPN = env->spr[SPR_DMISS];
3102
    }
3103
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3104
#if defined (DEBUG_SOFTWARE_TLB)
3105
    if (loglevel != 0) {
3106
        fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3107
                " PTE1 " ADDRX " way %d\n",
3108
                __func__, new_EPN, EPN, CMP, RPN, way);
3109
    }
3110
#endif
3111
    /* Store this TLB */
3112
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3113
                     way, is_code, CMP, RPN);
3114
}
3115

    
3116
void helper_6xx_tlbd (target_ulong EPN)
3117
{
3118
    do_6xx_tlb(EPN, 0);
3119
}
3120

    
3121
void helper_6xx_tlbi (target_ulong EPN)
3122
{
3123
    do_6xx_tlb(EPN, 1);
3124
}
3125

    
3126
/* PowerPC 74xx software TLB load instructions helpers */
3127
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3128
{
3129
    target_ulong RPN, CMP, EPN;
3130
    int way;
3131

    
3132
    RPN = env->spr[SPR_PTELO];
3133
    CMP = env->spr[SPR_PTEHI];
3134
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3135
    way = env->spr[SPR_TLBMISS] & 0x3;
3136
#if defined (DEBUG_SOFTWARE_TLB)
3137
    if (loglevel != 0) {
3138
        fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3139
                " PTE1 " ADDRX " way %d\n",
3140
                __func__, new_EPN, EPN, CMP, RPN, way);
3141
    }
3142
#endif
3143
    /* Store this TLB */
3144
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3145
                     way, is_code, CMP, RPN);
3146
}
3147

    
3148
void helper_74xx_tlbd (target_ulong EPN)
3149
{
3150
    do_74xx_tlb(EPN, 0);
3151
}
3152

    
3153
void helper_74xx_tlbi (target_ulong EPN)
3154
{
3155
    do_74xx_tlb(EPN, 1);
3156
}
3157

    
3158
static always_inline target_ulong booke_tlb_to_page_size (int size)
3159
{
3160
    return 1024 << (2 * size);
3161
}
3162

    
3163
static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3164
{
3165
    int size;
3166

    
3167
    switch (page_size) {
3168
    case 0x00000400UL:
3169
        size = 0x0;
3170
        break;
3171
    case 0x00001000UL:
3172
        size = 0x1;
3173
        break;
3174
    case 0x00004000UL:
3175
        size = 0x2;
3176
        break;
3177
    case 0x00010000UL:
3178
        size = 0x3;
3179
        break;
3180
    case 0x00040000UL:
3181
        size = 0x4;
3182
        break;
3183
    case 0x00100000UL:
3184
        size = 0x5;
3185
        break;
3186
    case 0x00400000UL:
3187
        size = 0x6;
3188
        break;
3189
    case 0x01000000UL:
3190
        size = 0x7;
3191
        break;
3192
    case 0x04000000UL:
3193
        size = 0x8;
3194
        break;
3195
    case 0x10000000UL:
3196
        size = 0x9;
3197
        break;
3198
    case 0x40000000UL:
3199
        size = 0xA;
3200
        break;
3201
#if defined (TARGET_PPC64)
3202
    case 0x000100000000ULL:
3203
        size = 0xB;
3204
        break;
3205
    case 0x000400000000ULL:
3206
        size = 0xC;
3207
        break;
3208
    case 0x001000000000ULL:
3209
        size = 0xD;
3210
        break;
3211
    case 0x004000000000ULL:
3212
        size = 0xE;
3213
        break;
3214
    case 0x010000000000ULL:
3215
        size = 0xF;
3216
        break;
3217
#endif
3218
    default:
3219
        size = -1;
3220
        break;
3221
    }
3222

    
3223
    return size;
3224
}
3225

    
3226
/* Helpers for 4xx TLB management */
3227
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3228
{
3229
    ppcemb_tlb_t *tlb;
3230
    target_ulong ret;
3231
    int size;
3232

    
3233
    entry &= 0x3F;
3234
    tlb = &env->tlb[entry].tlbe;
3235
    ret = tlb->EPN;
3236
    if (tlb->prot & PAGE_VALID)
3237
        ret |= 0x400;
3238
    size = booke_page_size_to_tlb(tlb->size);
3239
    if (size < 0 || size > 0x7)
3240
        size = 1;
3241
    ret |= size << 7;
3242
    env->spr[SPR_40x_PID] = tlb->PID;
3243
    return ret;
3244
}
3245

    
3246
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3247
{
3248
    ppcemb_tlb_t *tlb;
3249
    target_ulong ret;
3250

    
3251
    entry &= 0x3F;
3252
    tlb = &env->tlb[entry].tlbe;
3253
    ret = tlb->RPN;
3254
    if (tlb->prot & PAGE_EXEC)
3255
        ret |= 0x200;
3256
    if (tlb->prot & PAGE_WRITE)
3257
        ret |= 0x100;
3258
    return ret;
3259
}
3260

    
3261
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3262
{
3263
    ppcemb_tlb_t *tlb;
3264
    target_ulong page, end;
3265

    
3266
#if defined (DEBUG_SOFTWARE_TLB)
3267
    if (loglevel != 0) {
3268
        fprintf(logfile, "%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3269
    }
3270
#endif
3271
    entry &= 0x3F;
3272
    tlb = &env->tlb[entry].tlbe;
3273
    /* Invalidate previous TLB (if it's valid) */
3274
    if (tlb->prot & PAGE_VALID) {
3275
        end = tlb->EPN + tlb->size;
3276
#if defined (DEBUG_SOFTWARE_TLB)
3277
        if (loglevel != 0) {
3278
            fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
3279
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3280
        }
3281
#endif
3282
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3283
            tlb_flush_page(env, page);
3284
    }
3285
    tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3286
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3287
     * If this ever occurs, one should use the ppcemb target instead
3288
     * of the ppc or ppc64 one
3289
     */
3290
    if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3291
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3292
                  "are not supported (%d)\n",
3293
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3294
    }
3295
    tlb->EPN = val & ~(tlb->size - 1);
3296
    if (val & 0x40)
3297
        tlb->prot |= PAGE_VALID;
3298
    else
3299
        tlb->prot &= ~PAGE_VALID;
3300
    if (val & 0x20) {
3301
        /* XXX: TO BE FIXED */
3302
        cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3303
    }
3304
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3305
    tlb->attr = val & 0xFF;
3306
#if defined (DEBUG_SOFTWARE_TLB)
3307
    if (loglevel != 0) {
3308
        fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3309
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3310
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3311
                tlb->prot & PAGE_READ ? 'r' : '-',
3312
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3313
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3314
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3315
    }
3316
#endif
3317
    /* Invalidate new TLB (if valid) */
3318
    if (tlb->prot & PAGE_VALID) {
3319
        end = tlb->EPN + tlb->size;
3320
#if defined (DEBUG_SOFTWARE_TLB)
3321
        if (loglevel != 0) {
3322
            fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
3323
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3324
        }
3325
#endif
3326
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3327
            tlb_flush_page(env, page);
3328
    }
3329
}
3330

    
3331
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
3332
{
3333
    ppcemb_tlb_t *tlb;
3334

    
3335
#if defined (DEBUG_SOFTWARE_TLB)
3336
    if (loglevel != 0) {
3337
        fprintf(logfile, "%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
3338
    }
3339
#endif
3340
    entry &= 0x3F;
3341
    tlb = &env->tlb[entry].tlbe;
3342
    tlb->RPN = val & 0xFFFFFC00;
3343
    tlb->prot = PAGE_READ;
3344
    if (val & 0x200)
3345
        tlb->prot |= PAGE_EXEC;
3346
    if (val & 0x100)
3347
        tlb->prot |= PAGE_WRITE;
3348
#if defined (DEBUG_SOFTWARE_TLB)
3349
    if (loglevel != 0) {
3350
        fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3351
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3352
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3353
                tlb->prot & PAGE_READ ? 'r' : '-',
3354
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3355
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3356
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3357
    }
3358
#endif
3359
}
3360

    
3361
target_ulong helper_4xx_tlbsx (target_ulong address)
3362
{
3363
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
3364
}
3365

    
3366
/* PowerPC 440 TLB management */
3367
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
3368
{
3369
    ppcemb_tlb_t *tlb;
3370
    target_ulong EPN, RPN, size;
3371
    int do_flush_tlbs;
3372

    
3373
#if defined (DEBUG_SOFTWARE_TLB)
3374
    if (loglevel != 0) {
3375
        fprintf(logfile, "%s word %d entry %d value " ADDRX "\n",
3376
                __func__, word, (int)entry, value);
3377
    }
3378
#endif
3379
    do_flush_tlbs = 0;
3380
    entry &= 0x3F;
3381
    tlb = &env->tlb[entry].tlbe;
3382
    switch (word) {
3383
    default:
3384
        /* Just here to please gcc */
3385
    case 0:
3386
        EPN = value & 0xFFFFFC00;
3387
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
3388
            do_flush_tlbs = 1;
3389
        tlb->EPN = EPN;
3390
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
3391
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3392
            do_flush_tlbs = 1;
3393
        tlb->size = size;
3394
        tlb->attr &= ~0x1;
3395
        tlb->attr |= (value >> 8) & 1;
3396
        if (value & 0x200) {
3397
            tlb->prot |= PAGE_VALID;
3398
        } else {
3399
            if (tlb->prot & PAGE_VALID) {
3400
                tlb->prot &= ~PAGE_VALID;
3401
                do_flush_tlbs = 1;
3402
            }
3403
        }
3404
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3405
        if (do_flush_tlbs)
3406
            tlb_flush(env, 1);
3407
        break;
3408
    case 1:
3409
        RPN = value & 0xFFFFFC0F;
3410
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3411
            tlb_flush(env, 1);
3412
        tlb->RPN = RPN;
3413
        break;
3414
    case 2:
3415
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
3416
        tlb->prot = tlb->prot & PAGE_VALID;
3417
        if (value & 0x1)
3418
            tlb->prot |= PAGE_READ << 4;
3419
        if (value & 0x2)
3420
            tlb->prot |= PAGE_WRITE << 4;
3421
        if (value & 0x4)
3422
            tlb->prot |= PAGE_EXEC << 4;
3423
        if (value & 0x8)
3424
            tlb->prot |= PAGE_READ;
3425
        if (value & 0x10)
3426
            tlb->prot |= PAGE_WRITE;
3427
        if (value & 0x20)
3428
            tlb->prot |= PAGE_EXEC;
3429
        break;
3430
    }
3431
}
3432

    
3433
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
3434
{
3435
    ppcemb_tlb_t *tlb;
3436
    target_ulong ret;
3437
    int size;
3438

    
3439
    entry &= 0x3F;
3440
    tlb = &env->tlb[entry].tlbe;
3441
    switch (word) {
3442
    default:
3443
        /* Just here to please gcc */
3444
    case 0:
3445
        ret = tlb->EPN;
3446
        size = booke_page_size_to_tlb(tlb->size);
3447
        if (size < 0 || size > 0xF)
3448
            size = 1;
3449
        ret |= size << 4;
3450
        if (tlb->attr & 0x1)
3451
            ret |= 0x100;
3452
        if (tlb->prot & PAGE_VALID)
3453
            ret |= 0x200;
3454
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3455
        env->spr[SPR_440_MMUCR] |= tlb->PID;
3456
        break;
3457
    case 1:
3458
        ret = tlb->RPN;
3459
        break;
3460
    case 2:
3461
        ret = tlb->attr & ~0x1;
3462
        if (tlb->prot & (PAGE_READ << 4))
3463
            ret |= 0x1;
3464
        if (tlb->prot & (PAGE_WRITE << 4))
3465
            ret |= 0x2;
3466
        if (tlb->prot & (PAGE_EXEC << 4))
3467
            ret |= 0x4;
3468
        if (tlb->prot & PAGE_READ)
3469
            ret |= 0x8;
3470
        if (tlb->prot & PAGE_WRITE)
3471
            ret |= 0x10;
3472
        if (tlb->prot & PAGE_EXEC)
3473
            ret |= 0x20;
3474
        break;
3475
    }
3476
    return ret;
3477
}
3478

    
3479
target_ulong helper_440_tlbsx (target_ulong address)
3480
{
3481
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
3482
}
3483

    
3484
#endif /* !CONFIG_USER_ONLY */