Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ bb4ea393

History | View | Annotate | Download (124.7 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <string.h>
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "helper.h"
23

    
24
#include "helper_regs.h"
25

    
26
//#define DEBUG_OP
27
//#define DEBUG_EXCEPTIONS
28
//#define DEBUG_SOFTWARE_TLB
29

    
30
#ifdef DEBUG_SOFTWARE_TLB
31
#  define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
32
#else
33
#  define LOG_SWTLB(...) do { } while (0)
34
#endif
35

    
36

    
37
/*****************************************************************************/
38
/* Exceptions processing helpers */
39

    
40
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
41
{
42
#if 0
43
    printf("Raise exception %3x code : %d\n", exception, error_code);
44
#endif
45
    env->exception_index = exception;
46
    env->error_code = error_code;
47
    cpu_loop_exit();
48
}
49

    
50
void helper_raise_exception (uint32_t exception)
51
{
52
    helper_raise_exception_err(exception, 0);
53
}
54

    
55
/*****************************************************************************/
56
/* SPR accesses */
57
void helper_load_dump_spr (uint32_t sprn)
58
{
59
    qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
60
             env->spr[sprn]);
61
}
62

    
63
void helper_store_dump_spr (uint32_t sprn)
64
{
65
    qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
66
             env->spr[sprn]);
67
}
68

    
69
target_ulong helper_load_tbl (void)
70
{
71
    return (target_ulong)cpu_ppc_load_tbl(env);
72
}
73

    
74
target_ulong helper_load_tbu (void)
75
{
76
    return cpu_ppc_load_tbu(env);
77
}
78

    
79
target_ulong helper_load_atbl (void)
80
{
81
    return (target_ulong)cpu_ppc_load_atbl(env);
82
}
83

    
84
target_ulong helper_load_atbu (void)
85
{
86
    return cpu_ppc_load_atbu(env);
87
}
88

    
89
target_ulong helper_load_601_rtcl (void)
90
{
91
    return cpu_ppc601_load_rtcl(env);
92
}
93

    
94
target_ulong helper_load_601_rtcu (void)
95
{
96
    return cpu_ppc601_load_rtcu(env);
97
}
98

    
99
#if !defined(CONFIG_USER_ONLY)
100
#if defined (TARGET_PPC64)
101
void helper_store_asr (target_ulong val)
102
{
103
    ppc_store_asr(env, val);
104
}
105
#endif
106

    
107
void helper_store_sdr1 (target_ulong val)
108
{
109
    ppc_store_sdr1(env, val);
110
}
111

    
112
void helper_store_tbl (target_ulong val)
113
{
114
    cpu_ppc_store_tbl(env, val);
115
}
116

    
117
void helper_store_tbu (target_ulong val)
118
{
119
    cpu_ppc_store_tbu(env, val);
120
}
121

    
122
void helper_store_atbl (target_ulong val)
123
{
124
    cpu_ppc_store_atbl(env, val);
125
}
126

    
127
void helper_store_atbu (target_ulong val)
128
{
129
    cpu_ppc_store_atbu(env, val);
130
}
131

    
132
void helper_store_601_rtcl (target_ulong val)
133
{
134
    cpu_ppc601_store_rtcl(env, val);
135
}
136

    
137
void helper_store_601_rtcu (target_ulong val)
138
{
139
    cpu_ppc601_store_rtcu(env, val);
140
}
141

    
142
target_ulong helper_load_decr (void)
143
{
144
    return cpu_ppc_load_decr(env);
145
}
146

    
147
void helper_store_decr (target_ulong val)
148
{
149
    cpu_ppc_store_decr(env, val);
150
}
151

    
152
void helper_store_hid0_601 (target_ulong val)
153
{
154
    target_ulong hid0;
155

    
156
    hid0 = env->spr[SPR_HID0];
157
    if ((val ^ hid0) & 0x00000008) {
158
        /* Change current endianness */
159
        env->hflags &= ~(1 << MSR_LE);
160
        env->hflags_nmsr &= ~(1 << MSR_LE);
161
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
162
        env->hflags |= env->hflags_nmsr;
163
        qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
164
                 val & 0x8 ? 'l' : 'b', env->hflags);
165
    }
166
    env->spr[SPR_HID0] = (uint32_t)val;
167
}
168

    
169
void helper_store_403_pbr (uint32_t num, target_ulong value)
170
{
171
    if (likely(env->pb[num] != value)) {
172
        env->pb[num] = value;
173
        /* Should be optimized */
174
        tlb_flush(env, 1);
175
    }
176
}
177

    
178
target_ulong helper_load_40x_pit (void)
179
{
180
    return load_40x_pit(env);
181
}
182

    
183
void helper_store_40x_pit (target_ulong val)
184
{
185
    store_40x_pit(env, val);
186
}
187

    
188
void helper_store_40x_dbcr0 (target_ulong val)
189
{
190
    store_40x_dbcr0(env, val);
191
}
192

    
193
void helper_store_40x_sler (target_ulong val)
194
{
195
    store_40x_sler(env, val);
196
}
197

    
198
void helper_store_booke_tcr (target_ulong val)
199
{
200
    store_booke_tcr(env, val);
201
}
202

    
203
void helper_store_booke_tsr (target_ulong val)
204
{
205
    store_booke_tsr(env, val);
206
}
207

    
208
void helper_store_ibatu (uint32_t nr, target_ulong val)
209
{
210
    ppc_store_ibatu(env, nr, val);
211
}
212

    
213
void helper_store_ibatl (uint32_t nr, target_ulong val)
214
{
215
    ppc_store_ibatl(env, nr, val);
216
}
217

    
218
void helper_store_dbatu (uint32_t nr, target_ulong val)
219
{
220
    ppc_store_dbatu(env, nr, val);
221
}
222

    
223
void helper_store_dbatl (uint32_t nr, target_ulong val)
224
{
225
    ppc_store_dbatl(env, nr, val);
226
}
227

    
228
void helper_store_601_batl (uint32_t nr, target_ulong val)
229
{
230
    ppc_store_ibatl_601(env, nr, val);
231
}
232

    
233
void helper_store_601_batu (uint32_t nr, target_ulong val)
234
{
235
    ppc_store_ibatu_601(env, nr, val);
236
}
237
#endif
238

    
239
/*****************************************************************************/
240
/* Memory load and stores */
241

    
242
static inline target_ulong addr_add(target_ulong addr, target_long arg)
243
{
244
#if defined(TARGET_PPC64)
245
        if (!msr_sf)
246
            return (uint32_t)(addr + arg);
247
        else
248
#endif
249
            return addr + arg;
250
}
251

    
252
void helper_lmw (target_ulong addr, uint32_t reg)
253
{
254
    for (; reg < 32; reg++) {
255
        if (msr_le)
256
            env->gpr[reg] = bswap32(ldl(addr));
257
        else
258
            env->gpr[reg] = ldl(addr);
259
        addr = addr_add(addr, 4);
260
    }
261
}
262

    
263
void helper_stmw (target_ulong addr, uint32_t reg)
264
{
265
    for (; reg < 32; reg++) {
266
        if (msr_le)
267
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
268
        else
269
            stl(addr, (uint32_t)env->gpr[reg]);
270
        addr = addr_add(addr, 4);
271
    }
272
}
273

    
274
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
275
{
276
    int sh;
277
    for (; nb > 3; nb -= 4) {
278
        env->gpr[reg] = ldl(addr);
279
        reg = (reg + 1) % 32;
280
        addr = addr_add(addr, 4);
281
    }
282
    if (unlikely(nb > 0)) {
283
        env->gpr[reg] = 0;
284
        for (sh = 24; nb > 0; nb--, sh -= 8) {
285
            env->gpr[reg] |= ldub(addr) << sh;
286
            addr = addr_add(addr, 1);
287
        }
288
    }
289
}
290
/* PPC32 specification says we must generate an exception if
291
 * rA is in the range of registers to be loaded.
292
 * In an other hand, IBM says this is valid, but rA won't be loaded.
293
 * For now, I'll follow the spec...
294
 */
295
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
296
{
297
    if (likely(xer_bc != 0)) {
298
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
299
                     (reg < rb && (reg + xer_bc) > rb))) {
300
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
301
                                       POWERPC_EXCP_INVAL |
302
                                       POWERPC_EXCP_INVAL_LSWX);
303
        } else {
304
            helper_lsw(addr, xer_bc, reg);
305
        }
306
    }
307
}
308

    
309
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
310
{
311
    int sh;
312
    for (; nb > 3; nb -= 4) {
313
        stl(addr, env->gpr[reg]);
314
        reg = (reg + 1) % 32;
315
        addr = addr_add(addr, 4);
316
    }
317
    if (unlikely(nb > 0)) {
318
        for (sh = 24; nb > 0; nb--, sh -= 8) {
319
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
320
            addr = addr_add(addr, 1);
321
        }
322
    }
323
}
324

    
325
static void do_dcbz(target_ulong addr, int dcache_line_size)
326
{
327
    addr &= ~(dcache_line_size - 1);
328
    int i;
329
    for (i = 0 ; i < dcache_line_size ; i += 4) {
330
        stl(addr + i , 0);
331
    }
332
    if (env->reserve_addr == addr)
333
        env->reserve_addr = (target_ulong)-1ULL;
334
}
335

    
336
void helper_dcbz(target_ulong addr)
337
{
338
    do_dcbz(addr, env->dcache_line_size);
339
}
340

    
341
void helper_dcbz_970(target_ulong addr)
342
{
343
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
344
        do_dcbz(addr, 32);
345
    else
346
        do_dcbz(addr, env->dcache_line_size);
347
}
348

    
349
void helper_icbi(target_ulong addr)
350
{
351
    addr &= ~(env->dcache_line_size - 1);
352
    /* Invalidate one cache line :
353
     * PowerPC specification says this is to be treated like a load
354
     * (not a fetch) by the MMU. To be sure it will be so,
355
     * do the load "by hand".
356
     */
357
    ldl(addr);
358
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
359
}
360

    
361
// XXX: to be tested
362
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
363
{
364
    int i, c, d;
365
    d = 24;
366
    for (i = 0; i < xer_bc; i++) {
367
        c = ldub(addr);
368
        addr = addr_add(addr, 1);
369
        /* ra (if not 0) and rb are never modified */
370
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
371
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
372
        }
373
        if (unlikely(c == xer_cmp))
374
            break;
375
        if (likely(d != 0)) {
376
            d -= 8;
377
        } else {
378
            d = 24;
379
            reg++;
380
            reg = reg & 0x1F;
381
        }
382
    }
383
    return i;
384
}
385

    
386
/*****************************************************************************/
387
/* Fixed point operations helpers */
388
#if defined(TARGET_PPC64)
389

    
390
/* multiply high word */
391
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
392
{
393
    uint64_t tl, th;
394

    
395
    muls64(&tl, &th, arg1, arg2);
396
    return th;
397
}
398

    
399
/* multiply high word unsigned */
400
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
401
{
402
    uint64_t tl, th;
403

    
404
    mulu64(&tl, &th, arg1, arg2);
405
    return th;
406
}
407

    
408
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
409
{
410
    int64_t th;
411
    uint64_t tl;
412

    
413
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
414
    /* If th != 0 && th != -1, then we had an overflow */
415
    if (likely((uint64_t)(th + 1) <= 1)) {
416
        env->xer &= ~(1 << XER_OV);
417
    } else {
418
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
419
    }
420
    return (int64_t)tl;
421
}
422
#endif
423

    
424
target_ulong helper_cntlzw (target_ulong t)
425
{
426
    return clz32(t);
427
}
428

    
429
#if defined(TARGET_PPC64)
430
target_ulong helper_cntlzd (target_ulong t)
431
{
432
    return clz64(t);
433
}
434
#endif
435

    
436
/* shift right arithmetic helper */
437
target_ulong helper_sraw (target_ulong value, target_ulong shift)
438
{
439
    int32_t ret;
440

    
441
    if (likely(!(shift & 0x20))) {
442
        if (likely((uint32_t)shift != 0)) {
443
            shift &= 0x1f;
444
            ret = (int32_t)value >> shift;
445
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
446
                env->xer &= ~(1 << XER_CA);
447
            } else {
448
                env->xer |= (1 << XER_CA);
449
            }
450
        } else {
451
            ret = (int32_t)value;
452
            env->xer &= ~(1 << XER_CA);
453
        }
454
    } else {
455
        ret = (int32_t)value >> 31;
456
        if (ret) {
457
            env->xer |= (1 << XER_CA);
458
        } else {
459
            env->xer &= ~(1 << XER_CA);
460
        }
461
    }
462
    return (target_long)ret;
463
}
464

    
465
#if defined(TARGET_PPC64)
466
target_ulong helper_srad (target_ulong value, target_ulong shift)
467
{
468
    int64_t ret;
469

    
470
    if (likely(!(shift & 0x40))) {
471
        if (likely((uint64_t)shift != 0)) {
472
            shift &= 0x3f;
473
            ret = (int64_t)value >> shift;
474
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
475
                env->xer &= ~(1 << XER_CA);
476
            } else {
477
                env->xer |= (1 << XER_CA);
478
            }
479
        } else {
480
            ret = (int64_t)value;
481
            env->xer &= ~(1 << XER_CA);
482
        }
483
    } else {
484
        ret = (int64_t)value >> 63;
485
        if (ret) {
486
            env->xer |= (1 << XER_CA);
487
        } else {
488
            env->xer &= ~(1 << XER_CA);
489
        }
490
    }
491
    return ret;
492
}
493
#endif
494

    
495
target_ulong helper_popcntb (target_ulong val)
496
{
497
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
498
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
499
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
500
    return val;
501
}
502

    
503
#if defined(TARGET_PPC64)
504
target_ulong helper_popcntb_64 (target_ulong val)
505
{
506
    val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
507
    val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
508
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
509
    return val;
510
}
511
#endif
512

    
513
/*****************************************************************************/
514
/* Floating point operations helpers */
515
uint64_t helper_float32_to_float64(uint32_t arg)
516
{
517
    CPU_FloatU f;
518
    CPU_DoubleU d;
519
    f.l = arg;
520
    d.d = float32_to_float64(f.f, &env->fp_status);
521
    return d.ll;
522
}
523

    
524
uint32_t helper_float64_to_float32(uint64_t arg)
525
{
526
    CPU_FloatU f;
527
    CPU_DoubleU d;
528
    d.ll = arg;
529
    f.f = float64_to_float32(d.d, &env->fp_status);
530
    return f.l;
531
}
532

    
533
static inline int isden(float64 d)
534
{
535
    CPU_DoubleU u;
536

    
537
    u.d = d;
538

    
539
    return ((u.ll >> 52) & 0x7FF) == 0;
540
}
541

    
542
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
543
{
544
    CPU_DoubleU farg;
545
    int isneg;
546
    int ret;
547
    farg.ll = arg;
548
    isneg = float64_is_neg(farg.d);
549
    if (unlikely(float64_is_any_nan(farg.d))) {
550
        if (float64_is_signaling_nan(farg.d)) {
551
            /* Signaling NaN: flags are undefined */
552
            ret = 0x00;
553
        } else {
554
            /* Quiet NaN */
555
            ret = 0x11;
556
        }
557
    } else if (unlikely(float64_is_infinity(farg.d))) {
558
        /* +/- infinity */
559
        if (isneg)
560
            ret = 0x09;
561
        else
562
            ret = 0x05;
563
    } else {
564
        if (float64_is_zero(farg.d)) {
565
            /* +/- zero */
566
            if (isneg)
567
                ret = 0x12;
568
            else
569
                ret = 0x02;
570
        } else {
571
            if (isden(farg.d)) {
572
                /* Denormalized numbers */
573
                ret = 0x10;
574
            } else {
575
                /* Normalized numbers */
576
                ret = 0x00;
577
            }
578
            if (isneg) {
579
                ret |= 0x08;
580
            } else {
581
                ret |= 0x04;
582
            }
583
        }
584
    }
585
    if (set_fprf) {
586
        /* We update FPSCR_FPRF */
587
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
588
        env->fpscr |= ret << FPSCR_FPRF;
589
    }
590
    /* We just need fpcc to update Rc1 */
591
    return ret & 0xF;
592
}
593

    
594
/* Floating-point invalid operations exception */
595
static inline uint64_t fload_invalid_op_excp(int op)
596
{
597
    uint64_t ret = 0;
598
    int ve;
599

    
600
    ve = fpscr_ve;
601
    switch (op) {
602
    case POWERPC_EXCP_FP_VXSNAN:
603
        env->fpscr |= 1 << FPSCR_VXSNAN;
604
        break;
605
    case POWERPC_EXCP_FP_VXSOFT:
606
        env->fpscr |= 1 << FPSCR_VXSOFT;
607
        break;
608
    case POWERPC_EXCP_FP_VXISI:
609
        /* Magnitude subtraction of infinities */
610
        env->fpscr |= 1 << FPSCR_VXISI;
611
        goto update_arith;
612
    case POWERPC_EXCP_FP_VXIDI:
613
        /* Division of infinity by infinity */
614
        env->fpscr |= 1 << FPSCR_VXIDI;
615
        goto update_arith;
616
    case POWERPC_EXCP_FP_VXZDZ:
617
        /* Division of zero by zero */
618
        env->fpscr |= 1 << FPSCR_VXZDZ;
619
        goto update_arith;
620
    case POWERPC_EXCP_FP_VXIMZ:
621
        /* Multiplication of zero by infinity */
622
        env->fpscr |= 1 << FPSCR_VXIMZ;
623
        goto update_arith;
624
    case POWERPC_EXCP_FP_VXVC:
625
        /* Ordered comparison of NaN */
626
        env->fpscr |= 1 << FPSCR_VXVC;
627
        env->fpscr &= ~(0xF << FPSCR_FPCC);
628
        env->fpscr |= 0x11 << FPSCR_FPCC;
629
        /* We must update the target FPR before raising the exception */
630
        if (ve != 0) {
631
            env->exception_index = POWERPC_EXCP_PROGRAM;
632
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
633
            /* Update the floating-point enabled exception summary */
634
            env->fpscr |= 1 << FPSCR_FEX;
635
            /* Exception is differed */
636
            ve = 0;
637
        }
638
        break;
639
    case POWERPC_EXCP_FP_VXSQRT:
640
        /* Square root of a negative number */
641
        env->fpscr |= 1 << FPSCR_VXSQRT;
642
    update_arith:
643
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
644
        if (ve == 0) {
645
            /* Set the result to quiet NaN */
646
            ret = 0x7FF8000000000000ULL;
647
            env->fpscr &= ~(0xF << FPSCR_FPCC);
648
            env->fpscr |= 0x11 << FPSCR_FPCC;
649
        }
650
        break;
651
    case POWERPC_EXCP_FP_VXCVI:
652
        /* Invalid conversion */
653
        env->fpscr |= 1 << FPSCR_VXCVI;
654
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
655
        if (ve == 0) {
656
            /* Set the result to quiet NaN */
657
            ret = 0x7FF8000000000000ULL;
658
            env->fpscr &= ~(0xF << FPSCR_FPCC);
659
            env->fpscr |= 0x11 << FPSCR_FPCC;
660
        }
661
        break;
662
    }
663
    /* Update the floating-point invalid operation summary */
664
    env->fpscr |= 1 << FPSCR_VX;
665
    /* Update the floating-point exception summary */
666
    env->fpscr |= 1 << FPSCR_FX;
667
    if (ve != 0) {
668
        /* Update the floating-point enabled exception summary */
669
        env->fpscr |= 1 << FPSCR_FEX;
670
        if (msr_fe0 != 0 || msr_fe1 != 0)
671
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
672
    }
673
    return ret;
674
}
675

    
676
static inline void float_zero_divide_excp(void)
677
{
678
    env->fpscr |= 1 << FPSCR_ZX;
679
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
680
    /* Update the floating-point exception summary */
681
    env->fpscr |= 1 << FPSCR_FX;
682
    if (fpscr_ze != 0) {
683
        /* Update the floating-point enabled exception summary */
684
        env->fpscr |= 1 << FPSCR_FEX;
685
        if (msr_fe0 != 0 || msr_fe1 != 0) {
686
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
687
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
688
        }
689
    }
690
}
691

    
692
static inline void float_overflow_excp(void)
693
{
694
    env->fpscr |= 1 << FPSCR_OX;
695
    /* Update the floating-point exception summary */
696
    env->fpscr |= 1 << FPSCR_FX;
697
    if (fpscr_oe != 0) {
698
        /* XXX: should adjust the result */
699
        /* Update the floating-point enabled exception summary */
700
        env->fpscr |= 1 << FPSCR_FEX;
701
        /* We must update the target FPR before raising the exception */
702
        env->exception_index = POWERPC_EXCP_PROGRAM;
703
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
704
    } else {
705
        env->fpscr |= 1 << FPSCR_XX;
706
        env->fpscr |= 1 << FPSCR_FI;
707
    }
708
}
709

    
710
static inline void float_underflow_excp(void)
711
{
712
    env->fpscr |= 1 << FPSCR_UX;
713
    /* Update the floating-point exception summary */
714
    env->fpscr |= 1 << FPSCR_FX;
715
    if (fpscr_ue != 0) {
716
        /* XXX: should adjust the result */
717
        /* Update the floating-point enabled exception summary */
718
        env->fpscr |= 1 << FPSCR_FEX;
719
        /* We must update the target FPR before raising the exception */
720
        env->exception_index = POWERPC_EXCP_PROGRAM;
721
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
722
    }
723
}
724

    
725
static inline void float_inexact_excp(void)
726
{
727
    env->fpscr |= 1 << FPSCR_XX;
728
    /* Update the floating-point exception summary */
729
    env->fpscr |= 1 << FPSCR_FX;
730
    if (fpscr_xe != 0) {
731
        /* Update the floating-point enabled exception summary */
732
        env->fpscr |= 1 << FPSCR_FEX;
733
        /* We must update the target FPR before raising the exception */
734
        env->exception_index = POWERPC_EXCP_PROGRAM;
735
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
736
    }
737
}
738

    
739
static inline void fpscr_set_rounding_mode(void)
740
{
741
    int rnd_type;
742

    
743
    /* Set rounding mode */
744
    switch (fpscr_rn) {
745
    case 0:
746
        /* Best approximation (round to nearest) */
747
        rnd_type = float_round_nearest_even;
748
        break;
749
    case 1:
750
        /* Smaller magnitude (round toward zero) */
751
        rnd_type = float_round_to_zero;
752
        break;
753
    case 2:
754
        /* Round toward +infinite */
755
        rnd_type = float_round_up;
756
        break;
757
    default:
758
    case 3:
759
        /* Round toward -infinite */
760
        rnd_type = float_round_down;
761
        break;
762
    }
763
    set_float_rounding_mode(rnd_type, &env->fp_status);
764
}
765

    
766
void helper_fpscr_clrbit (uint32_t bit)
767
{
768
    int prev;
769

    
770
    prev = (env->fpscr >> bit) & 1;
771
    env->fpscr &= ~(1 << bit);
772
    if (prev == 1) {
773
        switch (bit) {
774
        case FPSCR_RN1:
775
        case FPSCR_RN:
776
            fpscr_set_rounding_mode();
777
            break;
778
        default:
779
            break;
780
        }
781
    }
782
}
783

    
784
void helper_fpscr_setbit (uint32_t bit)
785
{
786
    int prev;
787

    
788
    prev = (env->fpscr >> bit) & 1;
789
    env->fpscr |= 1 << bit;
790
    if (prev == 0) {
791
        switch (bit) {
792
        case FPSCR_VX:
793
            env->fpscr |= 1 << FPSCR_FX;
794
            if (fpscr_ve)
795
                goto raise_ve;
796
        case FPSCR_OX:
797
            env->fpscr |= 1 << FPSCR_FX;
798
            if (fpscr_oe)
799
                goto raise_oe;
800
            break;
801
        case FPSCR_UX:
802
            env->fpscr |= 1 << FPSCR_FX;
803
            if (fpscr_ue)
804
                goto raise_ue;
805
            break;
806
        case FPSCR_ZX:
807
            env->fpscr |= 1 << FPSCR_FX;
808
            if (fpscr_ze)
809
                goto raise_ze;
810
            break;
811
        case FPSCR_XX:
812
            env->fpscr |= 1 << FPSCR_FX;
813
            if (fpscr_xe)
814
                goto raise_xe;
815
            break;
816
        case FPSCR_VXSNAN:
817
        case FPSCR_VXISI:
818
        case FPSCR_VXIDI:
819
        case FPSCR_VXZDZ:
820
        case FPSCR_VXIMZ:
821
        case FPSCR_VXVC:
822
        case FPSCR_VXSOFT:
823
        case FPSCR_VXSQRT:
824
        case FPSCR_VXCVI:
825
            env->fpscr |= 1 << FPSCR_VX;
826
            env->fpscr |= 1 << FPSCR_FX;
827
            if (fpscr_ve != 0)
828
                goto raise_ve;
829
            break;
830
        case FPSCR_VE:
831
            if (fpscr_vx != 0) {
832
            raise_ve:
833
                env->error_code = POWERPC_EXCP_FP;
834
                if (fpscr_vxsnan)
835
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
836
                if (fpscr_vxisi)
837
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
838
                if (fpscr_vxidi)
839
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
840
                if (fpscr_vxzdz)
841
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
842
                if (fpscr_vximz)
843
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
844
                if (fpscr_vxvc)
845
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
846
                if (fpscr_vxsoft)
847
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
848
                if (fpscr_vxsqrt)
849
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
850
                if (fpscr_vxcvi)
851
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
852
                goto raise_excp;
853
            }
854
            break;
855
        case FPSCR_OE:
856
            if (fpscr_ox != 0) {
857
            raise_oe:
858
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
859
                goto raise_excp;
860
            }
861
            break;
862
        case FPSCR_UE:
863
            if (fpscr_ux != 0) {
864
            raise_ue:
865
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
866
                goto raise_excp;
867
            }
868
            break;
869
        case FPSCR_ZE:
870
            if (fpscr_zx != 0) {
871
            raise_ze:
872
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
873
                goto raise_excp;
874
            }
875
            break;
876
        case FPSCR_XE:
877
            if (fpscr_xx != 0) {
878
            raise_xe:
879
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
880
                goto raise_excp;
881
            }
882
            break;
883
        case FPSCR_RN1:
884
        case FPSCR_RN:
885
            fpscr_set_rounding_mode();
886
            break;
887
        default:
888
            break;
889
        raise_excp:
890
            /* Update the floating-point enabled exception summary */
891
            env->fpscr |= 1 << FPSCR_FEX;
892
                /* We have to update Rc1 before raising the exception */
893
            env->exception_index = POWERPC_EXCP_PROGRAM;
894
            break;
895
        }
896
    }
897
}
898

    
899
void helper_store_fpscr (uint64_t arg, uint32_t mask)
900
{
901
    /*
902
     * We use only the 32 LSB of the incoming fpr
903
     */
904
    uint32_t prev, new;
905
    int i;
906

    
907
    prev = env->fpscr;
908
    new = (uint32_t)arg;
909
    new &= ~0x60000000;
910
    new |= prev & 0x60000000;
911
    for (i = 0; i < 8; i++) {
912
        if (mask & (1 << i)) {
913
            env->fpscr &= ~(0xF << (4 * i));
914
            env->fpscr |= new & (0xF << (4 * i));
915
        }
916
    }
917
    /* Update VX and FEX */
918
    if (fpscr_ix != 0)
919
        env->fpscr |= 1 << FPSCR_VX;
920
    else
921
        env->fpscr &= ~(1 << FPSCR_VX);
922
    if ((fpscr_ex & fpscr_eex) != 0) {
923
        env->fpscr |= 1 << FPSCR_FEX;
924
        env->exception_index = POWERPC_EXCP_PROGRAM;
925
        /* XXX: we should compute it properly */
926
        env->error_code = POWERPC_EXCP_FP;
927
    }
928
    else
929
        env->fpscr &= ~(1 << FPSCR_FEX);
930
    fpscr_set_rounding_mode();
931
}
932

    
933
void helper_float_check_status (void)
934
{
935
#ifdef CONFIG_SOFTFLOAT
936
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
937
        (env->error_code & POWERPC_EXCP_FP)) {
938
        /* Differred floating-point exception after target FPR update */
939
        if (msr_fe0 != 0 || msr_fe1 != 0)
940
            helper_raise_exception_err(env->exception_index, env->error_code);
941
    } else {
942
        int status = get_float_exception_flags(&env->fp_status);
943
        if (status & float_flag_divbyzero) {
944
            float_zero_divide_excp();
945
        } else if (status & float_flag_overflow) {
946
            float_overflow_excp();
947
        } else if (status & float_flag_underflow) {
948
            float_underflow_excp();
949
        } else if (status & float_flag_inexact) {
950
            float_inexact_excp();
951
        }
952
    }
953
#else
954
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
955
        (env->error_code & POWERPC_EXCP_FP)) {
956
        /* Differred floating-point exception after target FPR update */
957
        if (msr_fe0 != 0 || msr_fe1 != 0)
958
            helper_raise_exception_err(env->exception_index, env->error_code);
959
    }
960
#endif
961
}
962

    
963
#ifdef CONFIG_SOFTFLOAT
964
void helper_reset_fpstatus (void)
965
{
966
    set_float_exception_flags(0, &env->fp_status);
967
}
968
#endif
969

    
970
/* fadd - fadd. */
971
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
972
{
973
    CPU_DoubleU farg1, farg2;
974

    
975
    farg1.ll = arg1;
976
    farg2.ll = arg2;
977

    
978
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
979
                 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
980
        /* Magnitude subtraction of infinities */
981
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
982
    } else {
983
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
984
                     float64_is_signaling_nan(farg2.d))) {
985
            /* sNaN addition */
986
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
987
        }
988
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
989
    }
990

    
991
    return farg1.ll;
992
}
993

    
994
/* fsub - fsub. */
995
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
996
{
997
    CPU_DoubleU farg1, farg2;
998

    
999
    farg1.ll = arg1;
1000
    farg2.ll = arg2;
1001

    
1002
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1003
                 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1004
        /* Magnitude subtraction of infinities */
1005
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1006
    } else {
1007
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1008
                     float64_is_signaling_nan(farg2.d))) {
1009
            /* sNaN subtraction */
1010
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1011
        }
1012
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1013
    }
1014

    
1015
    return farg1.ll;
1016
}
1017

    
1018
/* fmul - fmul. */
1019
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1020
{
1021
    CPU_DoubleU farg1, farg2;
1022

    
1023
    farg1.ll = arg1;
1024
    farg2.ll = arg2;
1025

    
1026
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1027
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1028
        /* Multiplication of zero by infinity */
1029
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1030
    } else {
1031
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1032
                     float64_is_signaling_nan(farg2.d))) {
1033
            /* sNaN multiplication */
1034
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1035
        }
1036
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1037
    }
1038

    
1039
    return farg1.ll;
1040
}
1041

    
1042
/* fdiv - fdiv. */
1043
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1044
{
1045
    CPU_DoubleU farg1, farg2;
1046

    
1047
    farg1.ll = arg1;
1048
    farg2.ll = arg2;
1049

    
1050
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1051
        /* Division of infinity by infinity */
1052
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1053
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1054
        /* Division of zero by zero */
1055
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1056
    } else {
1057
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1058
                     float64_is_signaling_nan(farg2.d))) {
1059
            /* sNaN division */
1060
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1061
        }
1062
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1063
    }
1064

    
1065
    return farg1.ll;
1066
}
1067

    
1068
/* fabs */
1069
uint64_t helper_fabs (uint64_t arg)
1070
{
1071
    CPU_DoubleU farg;
1072

    
1073
    farg.ll = arg;
1074
    farg.d = float64_abs(farg.d);
1075
    return farg.ll;
1076
}
1077

    
1078
/* fnabs */
1079
uint64_t helper_fnabs (uint64_t arg)
1080
{
1081
    CPU_DoubleU farg;
1082

    
1083
    farg.ll = arg;
1084
    farg.d = float64_abs(farg.d);
1085
    farg.d = float64_chs(farg.d);
1086
    return farg.ll;
1087
}
1088

    
1089
/* fneg */
1090
uint64_t helper_fneg (uint64_t arg)
1091
{
1092
    CPU_DoubleU farg;
1093

    
1094
    farg.ll = arg;
1095
    farg.d = float64_chs(farg.d);
1096
    return farg.ll;
1097
}
1098

    
1099
/* fctiw - fctiw. */
1100
uint64_t helper_fctiw (uint64_t arg)
1101
{
1102
    CPU_DoubleU farg;
1103
    farg.ll = arg;
1104

    
1105
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1106
        /* sNaN conversion */
1107
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1108
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1109
        /* qNan / infinity conversion */
1110
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1111
    } else {
1112
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1113
        /* XXX: higher bits are not supposed to be significant.
1114
         *     to make tests easier, return the same as a real PowerPC 750
1115
         */
1116
        farg.ll |= 0xFFF80000ULL << 32;
1117
    }
1118
    return farg.ll;
1119
}
1120

    
1121
/* fctiwz - fctiwz. */
1122
uint64_t helper_fctiwz (uint64_t arg)
1123
{
1124
    CPU_DoubleU farg;
1125
    farg.ll = arg;
1126

    
1127
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1128
        /* sNaN conversion */
1129
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1130
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1131
        /* qNan / infinity conversion */
1132
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1133
    } else {
1134
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1135
        /* XXX: higher bits are not supposed to be significant.
1136
         *     to make tests easier, return the same as a real PowerPC 750
1137
         */
1138
        farg.ll |= 0xFFF80000ULL << 32;
1139
    }
1140
    return farg.ll;
1141
}
1142

    
1143
#if defined(TARGET_PPC64)
1144
/* fcfid - fcfid. */
1145
uint64_t helper_fcfid (uint64_t arg)
1146
{
1147
    CPU_DoubleU farg;
1148
    farg.d = int64_to_float64(arg, &env->fp_status);
1149
    return farg.ll;
1150
}
1151

    
1152
/* fctid - fctid. */
1153
uint64_t helper_fctid (uint64_t arg)
1154
{
1155
    CPU_DoubleU farg;
1156
    farg.ll = arg;
1157

    
1158
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1159
        /* sNaN conversion */
1160
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1161
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1162
        /* qNan / infinity conversion */
1163
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1164
    } else {
1165
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1166
    }
1167
    return farg.ll;
1168
}
1169

    
1170
/* fctidz - fctidz. */
1171
uint64_t helper_fctidz (uint64_t arg)
1172
{
1173
    CPU_DoubleU farg;
1174
    farg.ll = arg;
1175

    
1176
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1177
        /* sNaN conversion */
1178
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1179
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1180
        /* qNan / infinity conversion */
1181
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1182
    } else {
1183
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1184
    }
1185
    return farg.ll;
1186
}
1187

    
1188
#endif
1189

    
1190
static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1191
{
1192
    CPU_DoubleU farg;
1193
    farg.ll = arg;
1194

    
1195
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1196
        /* sNaN round */
1197
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1198
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1199
        /* qNan / infinity round */
1200
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1201
    } else {
1202
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1203
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1204
        /* Restore rounding mode from FPSCR */
1205
        fpscr_set_rounding_mode();
1206
    }
1207
    return farg.ll;
1208
}
1209

    
1210
uint64_t helper_frin (uint64_t arg)
1211
{
1212
    return do_fri(arg, float_round_nearest_even);
1213
}
1214

    
1215
uint64_t helper_friz (uint64_t arg)
1216
{
1217
    return do_fri(arg, float_round_to_zero);
1218
}
1219

    
1220
uint64_t helper_frip (uint64_t arg)
1221
{
1222
    return do_fri(arg, float_round_up);
1223
}
1224

    
1225
uint64_t helper_frim (uint64_t arg)
1226
{
1227
    return do_fri(arg, float_round_down);
1228
}
1229

    
1230
/* fmadd - fmadd. */
1231
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1232
{
1233
    CPU_DoubleU farg1, farg2, farg3;
1234

    
1235
    farg1.ll = arg1;
1236
    farg2.ll = arg2;
1237
    farg3.ll = arg3;
1238

    
1239
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1240
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1241
        /* Multiplication of zero by infinity */
1242
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1243
    } else {
1244
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1245
                     float64_is_signaling_nan(farg2.d) ||
1246
                     float64_is_signaling_nan(farg3.d))) {
1247
            /* sNaN operation */
1248
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1249
        }
1250
#ifdef FLOAT128
1251
        /* This is the way the PowerPC specification defines it */
1252
        float128 ft0_128, ft1_128;
1253

    
1254
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1255
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1256
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1257
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1258
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1259
            /* Magnitude subtraction of infinities */
1260
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1261
        } else {
1262
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1263
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1264
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1265
        }
1266
#else
1267
        /* This is OK on x86 hosts */
1268
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1269
#endif
1270
    }
1271

    
1272
    return farg1.ll;
1273
}
1274

    
1275
/* fmsub - fmsub. */
1276
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1277
{
1278
    CPU_DoubleU farg1, farg2, farg3;
1279

    
1280
    farg1.ll = arg1;
1281
    farg2.ll = arg2;
1282
    farg3.ll = arg3;
1283

    
1284
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1285
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1286
        /* Multiplication of zero by infinity */
1287
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1288
    } else {
1289
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1290
                     float64_is_signaling_nan(farg2.d) ||
1291
                     float64_is_signaling_nan(farg3.d))) {
1292
            /* sNaN operation */
1293
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1294
        }
1295
#ifdef FLOAT128
1296
        /* This is the way the PowerPC specification defines it */
1297
        float128 ft0_128, ft1_128;
1298

    
1299
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1300
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1301
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1302
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1303
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1304
            /* Magnitude subtraction of infinities */
1305
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1306
        } else {
1307
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1308
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1309
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1310
        }
1311
#else
1312
        /* This is OK on x86 hosts */
1313
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1314
#endif
1315
    }
1316
    return farg1.ll;
1317
}
1318

    
1319
/* fnmadd - fnmadd. */
1320
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1321
{
1322
    CPU_DoubleU farg1, farg2, farg3;
1323

    
1324
    farg1.ll = arg1;
1325
    farg2.ll = arg2;
1326
    farg3.ll = arg3;
1327

    
1328
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1329
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1330
        /* Multiplication of zero by infinity */
1331
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1332
    } else {
1333
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1334
                     float64_is_signaling_nan(farg2.d) ||
1335
                     float64_is_signaling_nan(farg3.d))) {
1336
            /* sNaN operation */
1337
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1338
        }
1339
#ifdef FLOAT128
1340
        /* This is the way the PowerPC specification defines it */
1341
        float128 ft0_128, ft1_128;
1342

    
1343
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1344
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1345
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1346
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1347
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1348
            /* Magnitude subtraction of infinities */
1349
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1350
        } else {
1351
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1352
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1353
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1354
        }
1355
#else
1356
        /* This is OK on x86 hosts */
1357
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1358
#endif
1359
        if (likely(!float64_is_any_nan(farg1.d))) {
1360
            farg1.d = float64_chs(farg1.d);
1361
        }
1362
    }
1363
    return farg1.ll;
1364
}
1365

    
1366
/* fnmsub - fnmsub. */
1367
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1368
{
1369
    CPU_DoubleU farg1, farg2, farg3;
1370

    
1371
    farg1.ll = arg1;
1372
    farg2.ll = arg2;
1373
    farg3.ll = arg3;
1374

    
1375
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1376
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1377
        /* Multiplication of zero by infinity */
1378
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1379
    } else {
1380
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1381
                     float64_is_signaling_nan(farg2.d) ||
1382
                     float64_is_signaling_nan(farg3.d))) {
1383
            /* sNaN operation */
1384
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1385
        }
1386
#ifdef FLOAT128
1387
        /* This is the way the PowerPC specification defines it */
1388
        float128 ft0_128, ft1_128;
1389

    
1390
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1391
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1392
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1393
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1394
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1395
            /* Magnitude subtraction of infinities */
1396
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1397
        } else {
1398
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1399
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1400
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1401
        }
1402
#else
1403
        /* This is OK on x86 hosts */
1404
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1405
#endif
1406
        if (likely(!float64_is_any_nan(farg1.d))) {
1407
            farg1.d = float64_chs(farg1.d);
1408
        }
1409
    }
1410
    return farg1.ll;
1411
}
1412

    
1413
/* frsp - frsp. */
1414
uint64_t helper_frsp (uint64_t arg)
1415
{
1416
    CPU_DoubleU farg;
1417
    float32 f32;
1418
    farg.ll = arg;
1419

    
1420
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1421
        /* sNaN square root */
1422
       fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1423
    }
1424
    f32 = float64_to_float32(farg.d, &env->fp_status);
1425
    farg.d = float32_to_float64(f32, &env->fp_status);
1426

    
1427
    return farg.ll;
1428
}
1429

    
1430
/* fsqrt - fsqrt. */
1431
uint64_t helper_fsqrt (uint64_t arg)
1432
{
1433
    CPU_DoubleU farg;
1434
    farg.ll = arg;
1435

    
1436
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1437
        /* Square root of a negative nonzero number */
1438
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1439
    } else {
1440
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1441
            /* sNaN square root */
1442
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1443
        }
1444
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1445
    }
1446
    return farg.ll;
1447
}
1448

    
1449
/* fre - fre. */
1450
uint64_t helper_fre (uint64_t arg)
1451
{
1452
    CPU_DoubleU farg;
1453
    farg.ll = arg;
1454

    
1455
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1456
        /* sNaN reciprocal */
1457
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1458
    }
1459
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1460
    return farg.d;
1461
}
1462

    
1463
/* fres - fres. */
1464
uint64_t helper_fres (uint64_t arg)
1465
{
1466
    CPU_DoubleU farg;
1467
    float32 f32;
1468
    farg.ll = arg;
1469

    
1470
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1471
        /* sNaN reciprocal */
1472
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1473
    }
1474
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1475
    f32 = float64_to_float32(farg.d, &env->fp_status);
1476
    farg.d = float32_to_float64(f32, &env->fp_status);
1477

    
1478
    return farg.ll;
1479
}
1480

    
1481
/* frsqrte  - frsqrte. */
1482
uint64_t helper_frsqrte (uint64_t arg)
1483
{
1484
    CPU_DoubleU farg;
1485
    float32 f32;
1486
    farg.ll = arg;
1487

    
1488
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1489
        /* Reciprocal square root of a negative nonzero number */
1490
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1491
    } else {
1492
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1493
            /* sNaN reciprocal square root */
1494
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1495
        }
1496
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1497
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1498
        f32 = float64_to_float32(farg.d, &env->fp_status);
1499
        farg.d = float32_to_float64(f32, &env->fp_status);
1500
    }
1501
    return farg.ll;
1502
}
1503

    
1504
/* fsel - fsel. */
1505
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1506
{
1507
    CPU_DoubleU farg1;
1508

    
1509
    farg1.ll = arg1;
1510

    
1511
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_any_nan(farg1.d)) {
1512
        return arg2;
1513
    } else {
1514
        return arg3;
1515
    }
1516
}
1517

    
1518
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1519
{
1520
    CPU_DoubleU farg1, farg2;
1521
    uint32_t ret = 0;
1522
    farg1.ll = arg1;
1523
    farg2.ll = arg2;
1524

    
1525
    if (unlikely(float64_is_any_nan(farg1.d) ||
1526
                 float64_is_any_nan(farg2.d))) {
1527
        ret = 0x01UL;
1528
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1529
        ret = 0x08UL;
1530
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1531
        ret = 0x04UL;
1532
    } else {
1533
        ret = 0x02UL;
1534
    }
1535

    
1536
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1537
    env->fpscr |= ret << FPSCR_FPRF;
1538
    env->crf[crfD] = ret;
1539
    if (unlikely(ret == 0x01UL
1540
                 && (float64_is_signaling_nan(farg1.d) ||
1541
                     float64_is_signaling_nan(farg2.d)))) {
1542
        /* sNaN comparison */
1543
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1544
    }
1545
}
1546

    
1547
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1548
{
1549
    CPU_DoubleU farg1, farg2;
1550
    uint32_t ret = 0;
1551
    farg1.ll = arg1;
1552
    farg2.ll = arg2;
1553

    
1554
    if (unlikely(float64_is_any_nan(farg1.d) ||
1555
                 float64_is_any_nan(farg2.d))) {
1556
        ret = 0x01UL;
1557
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1558
        ret = 0x08UL;
1559
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1560
        ret = 0x04UL;
1561
    } else {
1562
        ret = 0x02UL;
1563
    }
1564

    
1565
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1566
    env->fpscr |= ret << FPSCR_FPRF;
1567
    env->crf[crfD] = ret;
1568
    if (unlikely (ret == 0x01UL)) {
1569
        if (float64_is_signaling_nan(farg1.d) ||
1570
            float64_is_signaling_nan(farg2.d)) {
1571
            /* sNaN comparison */
1572
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1573
                                  POWERPC_EXCP_FP_VXVC);
1574
        } else {
1575
            /* qNaN comparison */
1576
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1577
        }
1578
    }
1579
}
1580

    
1581
#if !defined (CONFIG_USER_ONLY)
1582
void helper_store_msr (target_ulong val)
1583
{
1584
    val = hreg_store_msr(env, val, 0);
1585
    if (val != 0) {
1586
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1587
        helper_raise_exception(val);
1588
    }
1589
}
1590

    
1591
static inline void do_rfi(target_ulong nip, target_ulong msr,
1592
                          target_ulong msrm, int keep_msrh)
1593
{
1594
#if defined(TARGET_PPC64)
1595
    if (msr & (1ULL << MSR_SF)) {
1596
        nip = (uint64_t)nip;
1597
        msr &= (uint64_t)msrm;
1598
    } else {
1599
        nip = (uint32_t)nip;
1600
        msr = (uint32_t)(msr & msrm);
1601
        if (keep_msrh)
1602
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1603
    }
1604
#else
1605
    nip = (uint32_t)nip;
1606
    msr &= (uint32_t)msrm;
1607
#endif
1608
    /* XXX: beware: this is false if VLE is supported */
1609
    env->nip = nip & ~((target_ulong)0x00000003);
1610
    hreg_store_msr(env, msr, 1);
1611
#if defined (DEBUG_OP)
1612
    cpu_dump_rfi(env->nip, env->msr);
1613
#endif
1614
    /* No need to raise an exception here,
1615
     * as rfi is always the last insn of a TB
1616
     */
1617
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1618
}
1619

    
1620
void helper_rfi (void)
1621
{
1622
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1623
           ~((target_ulong)0x783F0000), 1);
1624
}
1625

    
1626
#if defined(TARGET_PPC64)
1627
void helper_rfid (void)
1628
{
1629
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1630
           ~((target_ulong)0x783F0000), 0);
1631
}
1632

    
1633
void helper_hrfid (void)
1634
{
1635
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1636
           ~((target_ulong)0x783F0000), 0);
1637
}
1638
#endif
1639
#endif
1640

    
1641
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1642
{
1643
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1644
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1645
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1646
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1647
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1648
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1649
    }
1650
}
1651

    
1652
#if defined(TARGET_PPC64)
1653
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1654
{
1655
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1656
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1657
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1658
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1659
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1660
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1661
}
1662
#endif
1663

    
1664
/*****************************************************************************/
1665
/* PowerPC 601 specific instructions (POWER bridge) */
1666

    
1667
target_ulong helper_clcs (uint32_t arg)
1668
{
1669
    switch (arg) {
1670
    case 0x0CUL:
1671
        /* Instruction cache line size */
1672
        return env->icache_line_size;
1673
        break;
1674
    case 0x0DUL:
1675
        /* Data cache line size */
1676
        return env->dcache_line_size;
1677
        break;
1678
    case 0x0EUL:
1679
        /* Minimum cache line size */
1680
        return (env->icache_line_size < env->dcache_line_size) ?
1681
                env->icache_line_size : env->dcache_line_size;
1682
        break;
1683
    case 0x0FUL:
1684
        /* Maximum cache line size */
1685
        return (env->icache_line_size > env->dcache_line_size) ?
1686
                env->icache_line_size : env->dcache_line_size;
1687
        break;
1688
    default:
1689
        /* Undefined */
1690
        return 0;
1691
        break;
1692
    }
1693
}
1694

    
1695
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1696
{
1697
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1698

    
1699
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1700
        (int32_t)arg2 == 0) {
1701
        env->spr[SPR_MQ] = 0;
1702
        return INT32_MIN;
1703
    } else {
1704
        env->spr[SPR_MQ] = tmp % arg2;
1705
        return  tmp / (int32_t)arg2;
1706
    }
1707
}
1708

    
1709
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1710
{
1711
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1712

    
1713
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1714
        (int32_t)arg2 == 0) {
1715
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1716
        env->spr[SPR_MQ] = 0;
1717
        return INT32_MIN;
1718
    } else {
1719
        env->spr[SPR_MQ] = tmp % arg2;
1720
        tmp /= (int32_t)arg2;
1721
        if ((int32_t)tmp != tmp) {
1722
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1723
        } else {
1724
            env->xer &= ~(1 << XER_OV);
1725
        }
1726
        return tmp;
1727
    }
1728
}
1729

    
1730
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1731
{
1732
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1733
        (int32_t)arg2 == 0) {
1734
        env->spr[SPR_MQ] = 0;
1735
        return INT32_MIN;
1736
    } else {
1737
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1738
        return (int32_t)arg1 / (int32_t)arg2;
1739
    }
1740
}
1741

    
1742
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1743
{
1744
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1745
        (int32_t)arg2 == 0) {
1746
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1747
        env->spr[SPR_MQ] = 0;
1748
        return INT32_MIN;
1749
    } else {
1750
        env->xer &= ~(1 << XER_OV);
1751
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1752
        return (int32_t)arg1 / (int32_t)arg2;
1753
    }
1754
}
1755

    
1756
#if !defined (CONFIG_USER_ONLY)
1757
target_ulong helper_rac (target_ulong addr)
1758
{
1759
    mmu_ctx_t ctx;
1760
    int nb_BATs;
1761
    target_ulong ret = 0;
1762

    
1763
    /* We don't have to generate many instances of this instruction,
1764
     * as rac is supervisor only.
1765
     */
1766
    /* XXX: FIX THIS: Pretend we have no BAT */
1767
    nb_BATs = env->nb_BATs;
1768
    env->nb_BATs = 0;
1769
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1770
        ret = ctx.raddr;
1771
    env->nb_BATs = nb_BATs;
1772
    return ret;
1773
}
1774

    
1775
void helper_rfsvc (void)
1776
{
1777
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1778
}
1779
#endif
1780

    
1781
/*****************************************************************************/
1782
/* 602 specific instructions */
1783
/* mfrom is the most crazy instruction ever seen, imho ! */
1784
/* Real implementation uses a ROM table. Do the same */
1785
/* Extremly decomposed:
1786
 *                      -arg / 256
1787
 * return 256 * log10(10           + 1.0) + 0.5
1788
 */
1789
#if !defined (CONFIG_USER_ONLY)
1790
target_ulong helper_602_mfrom (target_ulong arg)
1791
{
1792
    if (likely(arg < 602)) {
1793
#include "mfrom_table.c"
1794
        return mfrom_ROM_table[arg];
1795
    } else {
1796
        return 0;
1797
    }
1798
}
1799
#endif
1800

    
1801
/*****************************************************************************/
1802
/* Embedded PowerPC specific helpers */
1803

    
1804
/* XXX: to be improved to check access rights when in user-mode */
1805
target_ulong helper_load_dcr (target_ulong dcrn)
1806
{
1807
    uint32_t val = 0;
1808

    
1809
    if (unlikely(env->dcr_env == NULL)) {
1810
        qemu_log("No DCR environment\n");
1811
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1812
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1813
    } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1814
        qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1815
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1816
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1817
    }
1818
    return val;
1819
}
1820

    
1821
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1822
{
1823
    if (unlikely(env->dcr_env == NULL)) {
1824
        qemu_log("No DCR environment\n");
1825
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1826
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1827
    } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1828
        qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1829
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1830
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1831
    }
1832
}
1833

    
1834
#if !defined(CONFIG_USER_ONLY)
1835
void helper_40x_rfci (void)
1836
{
1837
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1838
           ~((target_ulong)0xFFFF0000), 0);
1839
}
1840

    
1841
void helper_rfci (void)
1842
{
1843
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1844
           ~((target_ulong)0x3FFF0000), 0);
1845
}
1846

    
1847
void helper_rfdi (void)
1848
{
1849
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1850
           ~((target_ulong)0x3FFF0000), 0);
1851
}
1852

    
1853
void helper_rfmci (void)
1854
{
1855
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1856
           ~((target_ulong)0x3FFF0000), 0);
1857
}
1858
#endif
1859

    
1860
/* 440 specific */
1861
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1862
{
1863
    target_ulong mask;
1864
    int i;
1865

    
1866
    i = 1;
1867
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1868
        if ((high & mask) == 0) {
1869
            if (update_Rc) {
1870
                env->crf[0] = 0x4;
1871
            }
1872
            goto done;
1873
        }
1874
        i++;
1875
    }
1876
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1877
        if ((low & mask) == 0) {
1878
            if (update_Rc) {
1879
                env->crf[0] = 0x8;
1880
            }
1881
            goto done;
1882
        }
1883
        i++;
1884
    }
1885
    if (update_Rc) {
1886
        env->crf[0] = 0x2;
1887
    }
1888
 done:
1889
    env->xer = (env->xer & ~0x7F) | i;
1890
    if (update_Rc) {
1891
        env->crf[0] |= xer_so;
1892
    }
1893
    return i;
1894
}
1895

    
1896
/*****************************************************************************/
1897
/* Altivec extension helpers */
1898
#if defined(HOST_WORDS_BIGENDIAN)
1899
#define HI_IDX 0
1900
#define LO_IDX 1
1901
#else
1902
#define HI_IDX 1
1903
#define LO_IDX 0
1904
#endif
1905

    
1906
#if defined(HOST_WORDS_BIGENDIAN)
1907
#define VECTOR_FOR_INORDER_I(index, element)            \
1908
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1909
#else
1910
#define VECTOR_FOR_INORDER_I(index, element)            \
1911
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1912
#endif
1913

    
1914
/* If X is a NaN, store the corresponding QNaN into RESULT.  Otherwise,
1915
 * execute the following block.  */
1916
#define DO_HANDLE_NAN(result, x)                \
1917
    if (float32_is_any_nan(x)) {                                \
1918
        CPU_FloatU __f;                                         \
1919
        __f.f = x;                                              \
1920
        __f.l = __f.l | (1 << 22);  /* Set QNaN bit. */         \
1921
        result = __f.f;                                         \
1922
    } else
1923

    
1924
#define HANDLE_NAN1(result, x)                  \
1925
    DO_HANDLE_NAN(result, x)
1926
#define HANDLE_NAN2(result, x, y)               \
1927
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1928
#define HANDLE_NAN3(result, x, y, z)            \
1929
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1930

    
1931
/* Saturating arithmetic helpers.  */
1932
#define SATCVT(from, to, from_type, to_type, min, max)                  \
1933
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1934
    {                                                                   \
1935
        to_type r;                                                      \
1936
        if (x < (from_type)min) {                                       \
1937
            r = min;                                                    \
1938
            *sat = 1;                                                   \
1939
        } else if (x > (from_type)max) {                                \
1940
            r = max;                                                    \
1941
            *sat = 1;                                                   \
1942
        } else {                                                        \
1943
            r = x;                                                      \
1944
        }                                                               \
1945
        return r;                                                       \
1946
    }
1947
#define SATCVTU(from, to, from_type, to_type, min, max)                 \
1948
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1949
    {                                                                   \
1950
        to_type r;                                                      \
1951
        if (x > (from_type)max) {                                       \
1952
            r = max;                                                    \
1953
            *sat = 1;                                                   \
1954
        } else {                                                        \
1955
            r = x;                                                      \
1956
        }                                                               \
1957
        return r;                                                       \
1958
    }
1959
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
1960
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
1961
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
1962

    
1963
SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
1964
SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
1965
SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
1966
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
1967
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
1968
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
1969
#undef SATCVT
1970
#undef SATCVTU
1971

    
1972
#define LVE(name, access, swap, element)                        \
1973
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
1974
    {                                                           \
1975
        size_t n_elems = ARRAY_SIZE(r->element);                \
1976
        int adjust = HI_IDX*(n_elems-1);                        \
1977
        int sh = sizeof(r->element[0]) >> 1;                    \
1978
        int index = (addr & 0xf) >> sh;                         \
1979
        if(msr_le) {                                            \
1980
            r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
1981
        } else {                                                        \
1982
            r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
1983
        }                                                               \
1984
    }
1985
#define I(x) (x)
1986
LVE(lvebx, ldub, I, u8)
1987
LVE(lvehx, lduw, bswap16, u16)
1988
LVE(lvewx, ldl, bswap32, u32)
1989
#undef I
1990
#undef LVE
1991

    
1992
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
1993
{
1994
    int i, j = (sh & 0xf);
1995

    
1996
    VECTOR_FOR_INORDER_I (i, u8) {
1997
        r->u8[i] = j++;
1998
    }
1999
}
2000

    
2001
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2002
{
2003
    int i, j = 0x10 - (sh & 0xf);
2004

    
2005
    VECTOR_FOR_INORDER_I (i, u8) {
2006
        r->u8[i] = j++;
2007
    }
2008
}
2009

    
2010
#define STVE(name, access, swap, element)                       \
2011
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2012
    {                                                           \
2013
        size_t n_elems = ARRAY_SIZE(r->element);                \
2014
        int adjust = HI_IDX*(n_elems-1);                        \
2015
        int sh = sizeof(r->element[0]) >> 1;                    \
2016
        int index = (addr & 0xf) >> sh;                         \
2017
        if(msr_le) {                                            \
2018
            access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2019
        } else {                                                        \
2020
            access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2021
        }                                                               \
2022
    }
2023
#define I(x) (x)
2024
STVE(stvebx, stb, I, u8)
2025
STVE(stvehx, stw, bswap16, u16)
2026
STVE(stvewx, stl, bswap32, u32)
2027
#undef I
2028
#undef LVE
2029

    
2030
void helper_mtvscr (ppc_avr_t *r)
2031
{
2032
#if defined(HOST_WORDS_BIGENDIAN)
2033
    env->vscr = r->u32[3];
2034
#else
2035
    env->vscr = r->u32[0];
2036
#endif
2037
    set_flush_to_zero(vscr_nj, &env->vec_status);
2038
}
2039

    
2040
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2041
{
2042
    int i;
2043
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2044
        r->u32[i] = ~a->u32[i] < b->u32[i];
2045
    }
2046
}
2047

    
2048
#define VARITH_DO(name, op, element)        \
2049
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2050
{                                                                       \
2051
    int i;                                                              \
2052
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2053
        r->element[i] = a->element[i] op b->element[i];                 \
2054
    }                                                                   \
2055
}
2056
#define VARITH(suffix, element)                  \
2057
  VARITH_DO(add##suffix, +, element)             \
2058
  VARITH_DO(sub##suffix, -, element)
2059
VARITH(ubm, u8)
2060
VARITH(uhm, u16)
2061
VARITH(uwm, u32)
2062
#undef VARITH_DO
2063
#undef VARITH
2064

    
2065
#define VARITHFP(suffix, func)                                          \
2066
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2067
    {                                                                   \
2068
        int i;                                                          \
2069
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2070
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2071
                r->f[i] = func(a->f[i], b->f[i], &env->vec_status);     \
2072
            }                                                           \
2073
        }                                                               \
2074
    }
2075
VARITHFP(addfp, float32_add)
2076
VARITHFP(subfp, float32_sub)
2077
#undef VARITHFP
2078

    
2079
#define VARITHSAT_CASE(type, op, cvt, element)                          \
2080
    {                                                                   \
2081
        type result = (type)a->element[i] op (type)b->element[i];       \
2082
        r->element[i] = cvt(result, &sat);                              \
2083
    }
2084

    
2085
#define VARITHSAT_DO(name, op, optype, cvt, element)                    \
2086
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2087
    {                                                                   \
2088
        int sat = 0;                                                    \
2089
        int i;                                                          \
2090
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2091
            switch (sizeof(r->element[0])) {                            \
2092
            case 1: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2093
            case 2: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2094
            case 4: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2095
            }                                                           \
2096
        }                                                               \
2097
        if (sat) {                                                      \
2098
            env->vscr |= (1 << VSCR_SAT);                               \
2099
        }                                                               \
2100
    }
2101
#define VARITHSAT_SIGNED(suffix, element, optype, cvt)        \
2102
    VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)    \
2103
    VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2104
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)       \
2105
    VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)     \
2106
    VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2107
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2108
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2109
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2110
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2111
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2112
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2113
#undef VARITHSAT_CASE
2114
#undef VARITHSAT_DO
2115
#undef VARITHSAT_SIGNED
2116
#undef VARITHSAT_UNSIGNED
2117

    
2118
#define VAVG_DO(name, element, etype)                                   \
2119
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2120
    {                                                                   \
2121
        int i;                                                          \
2122
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2123
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2124
            r->element[i] = x >> 1;                                     \
2125
        }                                                               \
2126
    }
2127

    
2128
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2129
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2130
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2131
VAVG(b, s8, int16_t, u8, uint16_t)
2132
VAVG(h, s16, int32_t, u16, uint32_t)
2133
VAVG(w, s32, int64_t, u32, uint64_t)
2134
#undef VAVG_DO
2135
#undef VAVG
2136

    
2137
#define VCF(suffix, cvt, element)                                       \
2138
    void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2139
    {                                                                   \
2140
        int i;                                                          \
2141
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2142
            float32 t = cvt(b->element[i], &env->vec_status);           \
2143
            r->f[i] = float32_scalbn (t, -uim, &env->vec_status);       \
2144
        }                                                               \
2145
    }
2146
VCF(ux, uint32_to_float32, u32)
2147
VCF(sx, int32_to_float32, s32)
2148
#undef VCF
2149

    
2150
#define VCMP_DO(suffix, compare, element, record)                       \
2151
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2152
    {                                                                   \
2153
        uint32_t ones = (uint32_t)-1;                                   \
2154
        uint32_t all = ones;                                            \
2155
        uint32_t none = 0;                                              \
2156
        int i;                                                          \
2157
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2158
            uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2159
            switch (sizeof (a->element[0])) {                           \
2160
            case 4: r->u32[i] = result; break;                          \
2161
            case 2: r->u16[i] = result; break;                          \
2162
            case 1: r->u8[i] = result; break;                           \
2163
            }                                                           \
2164
            all &= result;                                              \
2165
            none |= result;                                             \
2166
        }                                                               \
2167
        if (record) {                                                   \
2168
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2169
        }                                                               \
2170
    }
2171
#define VCMP(suffix, compare, element)          \
2172
    VCMP_DO(suffix, compare, element, 0)        \
2173
    VCMP_DO(suffix##_dot, compare, element, 1)
2174
VCMP(equb, ==, u8)
2175
VCMP(equh, ==, u16)
2176
VCMP(equw, ==, u32)
2177
VCMP(gtub, >, u8)
2178
VCMP(gtuh, >, u16)
2179
VCMP(gtuw, >, u32)
2180
VCMP(gtsb, >, s8)
2181
VCMP(gtsh, >, s16)
2182
VCMP(gtsw, >, s32)
2183
#undef VCMP_DO
2184
#undef VCMP
2185

    
2186
#define VCMPFP_DO(suffix, compare, order, record)                       \
2187
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2188
    {                                                                   \
2189
        uint32_t ones = (uint32_t)-1;                                   \
2190
        uint32_t all = ones;                                            \
2191
        uint32_t none = 0;                                              \
2192
        int i;                                                          \
2193
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2194
            uint32_t result;                                            \
2195
            int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2196
            if (rel == float_relation_unordered) {                      \
2197
                result = 0;                                             \
2198
            } else if (rel compare order) {                             \
2199
                result = ones;                                          \
2200
            } else {                                                    \
2201
                result = 0;                                             \
2202
            }                                                           \
2203
            r->u32[i] = result;                                         \
2204
            all &= result;                                              \
2205
            none |= result;                                             \
2206
        }                                                               \
2207
        if (record) {                                                   \
2208
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2209
        }                                                               \
2210
    }
2211
#define VCMPFP(suffix, compare, order)           \
2212
    VCMPFP_DO(suffix, compare, order, 0)         \
2213
    VCMPFP_DO(suffix##_dot, compare, order, 1)
2214
VCMPFP(eqfp, ==, float_relation_equal)
2215
VCMPFP(gefp, !=, float_relation_less)
2216
VCMPFP(gtfp, ==, float_relation_greater)
2217
#undef VCMPFP_DO
2218
#undef VCMPFP
2219

    
2220
static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2221
                                    int record)
2222
{
2223
    int i;
2224
    int all_in = 0;
2225
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2226
        int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2227
        if (le_rel == float_relation_unordered) {
2228
            r->u32[i] = 0xc0000000;
2229
            /* ALL_IN does not need to be updated here.  */
2230
        } else {
2231
            float32 bneg = float32_chs(b->f[i]);
2232
            int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2233
            int le = le_rel != float_relation_greater;
2234
            int ge = ge_rel != float_relation_less;
2235
            r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2236
            all_in |= (!le | !ge);
2237
        }
2238
    }
2239
    if (record) {
2240
        env->crf[6] = (all_in == 0) << 1;
2241
    }
2242
}
2243

    
2244
void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2245
{
2246
    vcmpbfp_internal(r, a, b, 0);
2247
}
2248

    
2249
void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2250
{
2251
    vcmpbfp_internal(r, a, b, 1);
2252
}
2253

    
2254
#define VCT(suffix, satcvt, element)                                    \
2255
    void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2256
    {                                                                   \
2257
        int i;                                                          \
2258
        int sat = 0;                                                    \
2259
        float_status s = env->vec_status;                               \
2260
        set_float_rounding_mode(float_round_to_zero, &s);               \
2261
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2262
            if (float32_is_any_nan(b->f[i])) {                          \
2263
                r->element[i] = 0;                                      \
2264
            } else {                                                    \
2265
                float64 t = float32_to_float64(b->f[i], &s);            \
2266
                int64_t j;                                              \
2267
                t = float64_scalbn(t, uim, &s);                         \
2268
                j = float64_to_int64(t, &s);                            \
2269
                r->element[i] = satcvt(j, &sat);                        \
2270
            }                                                           \
2271
        }                                                               \
2272
        if (sat) {                                                      \
2273
            env->vscr |= (1 << VSCR_SAT);                               \
2274
        }                                                               \
2275
    }
2276
VCT(uxs, cvtsduw, u32)
2277
VCT(sxs, cvtsdsw, s32)
2278
#undef VCT
2279

    
2280
void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2281
{
2282
    int i;
2283
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2284
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2285
            /* Need to do the computation in higher precision and round
2286
             * once at the end.  */
2287
            float64 af, bf, cf, t;
2288
            af = float32_to_float64(a->f[i], &env->vec_status);
2289
            bf = float32_to_float64(b->f[i], &env->vec_status);
2290
            cf = float32_to_float64(c->f[i], &env->vec_status);
2291
            t = float64_mul(af, cf, &env->vec_status);
2292
            t = float64_add(t, bf, &env->vec_status);
2293
            r->f[i] = float64_to_float32(t, &env->vec_status);
2294
        }
2295
    }
2296
}
2297

    
2298
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2299
{
2300
    int sat = 0;
2301
    int i;
2302

    
2303
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2304
        int32_t prod = a->s16[i] * b->s16[i];
2305
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2306
        r->s16[i] = cvtswsh (t, &sat);
2307
    }
2308

    
2309
    if (sat) {
2310
        env->vscr |= (1 << VSCR_SAT);
2311
    }
2312
}
2313

    
2314
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2315
{
2316
    int sat = 0;
2317
    int i;
2318

    
2319
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2320
        int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2321
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2322
        r->s16[i] = cvtswsh (t, &sat);
2323
    }
2324

    
2325
    if (sat) {
2326
        env->vscr |= (1 << VSCR_SAT);
2327
    }
2328
}
2329

    
2330
#define VMINMAX_DO(name, compare, element)                              \
2331
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2332
    {                                                                   \
2333
        int i;                                                          \
2334
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2335
            if (a->element[i] compare b->element[i]) {                  \
2336
                r->element[i] = b->element[i];                          \
2337
            } else {                                                    \
2338
                r->element[i] = a->element[i];                          \
2339
            }                                                           \
2340
        }                                                               \
2341
    }
2342
#define VMINMAX(suffix, element)                \
2343
  VMINMAX_DO(min##suffix, >, element)           \
2344
  VMINMAX_DO(max##suffix, <, element)
2345
VMINMAX(sb, s8)
2346
VMINMAX(sh, s16)
2347
VMINMAX(sw, s32)
2348
VMINMAX(ub, u8)
2349
VMINMAX(uh, u16)
2350
VMINMAX(uw, u32)
2351
#undef VMINMAX_DO
2352
#undef VMINMAX
2353

    
2354
#define VMINMAXFP(suffix, rT, rF)                                       \
2355
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2356
    {                                                                   \
2357
        int i;                                                          \
2358
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2359
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2360
                if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2361
                    r->f[i] = rT->f[i];                                 \
2362
                } else {                                                \
2363
                    r->f[i] = rF->f[i];                                 \
2364
                }                                                       \
2365
            }                                                           \
2366
        }                                                               \
2367
    }
2368
VMINMAXFP(minfp, a, b)
2369
VMINMAXFP(maxfp, b, a)
2370
#undef VMINMAXFP
2371

    
2372
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2373
{
2374
    int i;
2375
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2376
        int32_t prod = a->s16[i] * b->s16[i];
2377
        r->s16[i] = (int16_t) (prod + c->s16[i]);
2378
    }
2379
}
2380

    
2381
#define VMRG_DO(name, element, highp)                                   \
2382
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2383
    {                                                                   \
2384
        ppc_avr_t result;                                               \
2385
        int i;                                                          \
2386
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2387
        for (i = 0; i < n_elems/2; i++) {                               \
2388
            if (highp) {                                                \
2389
                result.element[i*2+HI_IDX] = a->element[i];             \
2390
                result.element[i*2+LO_IDX] = b->element[i];             \
2391
            } else {                                                    \
2392
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2393
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2394
            }                                                           \
2395
        }                                                               \
2396
        *r = result;                                                    \
2397
    }
2398
#if defined(HOST_WORDS_BIGENDIAN)
2399
#define MRGHI 0
2400
#define MRGLO 1
2401
#else
2402
#define MRGHI 1
2403
#define MRGLO 0
2404
#endif
2405
#define VMRG(suffix, element)                   \
2406
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2407
  VMRG_DO(mrgh##suffix, element, MRGLO)
2408
VMRG(b, u8)
2409
VMRG(h, u16)
2410
VMRG(w, u32)
2411
#undef VMRG_DO
2412
#undef VMRG
2413
#undef MRGHI
2414
#undef MRGLO
2415

    
2416
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2417
{
2418
    int32_t prod[16];
2419
    int i;
2420

    
2421
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2422
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2423
    }
2424

    
2425
    VECTOR_FOR_INORDER_I(i, s32) {
2426
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2427
    }
2428
}
2429

    
2430
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2431
{
2432
    int32_t prod[8];
2433
    int i;
2434

    
2435
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2436
        prod[i] = a->s16[i] * b->s16[i];
2437
    }
2438

    
2439
    VECTOR_FOR_INORDER_I(i, s32) {
2440
        r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2441
    }
2442
}
2443

    
2444
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2445
{
2446
    int32_t prod[8];
2447
    int i;
2448
    int sat = 0;
2449

    
2450
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2451
        prod[i] = (int32_t)a->s16[i] * b->s16[i];
2452
    }
2453

    
2454
    VECTOR_FOR_INORDER_I (i, s32) {
2455
        int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2456
        r->u32[i] = cvtsdsw(t, &sat);
2457
    }
2458

    
2459
    if (sat) {
2460
        env->vscr |= (1 << VSCR_SAT);
2461
    }
2462
}
2463

    
2464
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2465
{
2466
    uint16_t prod[16];
2467
    int i;
2468

    
2469
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2470
        prod[i] = a->u8[i] * b->u8[i];
2471
    }
2472

    
2473
    VECTOR_FOR_INORDER_I(i, u32) {
2474
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2475
    }
2476
}
2477

    
2478
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2479
{
2480
    uint32_t prod[8];
2481
    int i;
2482

    
2483
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2484
        prod[i] = a->u16[i] * b->u16[i];
2485
    }
2486

    
2487
    VECTOR_FOR_INORDER_I(i, u32) {
2488
        r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2489
    }
2490
}
2491

    
2492
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2493
{
2494
    uint32_t prod[8];
2495
    int i;
2496
    int sat = 0;
2497

    
2498
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2499
        prod[i] = a->u16[i] * b->u16[i];
2500
    }
2501

    
2502
    VECTOR_FOR_INORDER_I (i, s32) {
2503
        uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2504
        r->u32[i] = cvtuduw(t, &sat);
2505
    }
2506

    
2507
    if (sat) {
2508
        env->vscr |= (1 << VSCR_SAT);
2509
    }
2510
}
2511

    
2512
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2513
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2514
    {                                                                   \
2515
        int i;                                                          \
2516
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2517
            if (evenp) {                                                \
2518
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2519
            } else {                                                    \
2520
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2521
            }                                                           \
2522
        }                                                               \
2523
    }
2524
#define VMUL(suffix, mul_element, prod_element) \
2525
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2526
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2527
VMUL(sb, s8, s16)
2528
VMUL(sh, s16, s32)
2529
VMUL(ub, u8, u16)
2530
VMUL(uh, u16, u32)
2531
#undef VMUL_DO
2532
#undef VMUL
2533

    
2534
void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2535
{
2536
    int i;
2537
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2538
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2539
            /* Need to do the computation is higher precision and round
2540
             * once at the end.  */
2541
            float64 af, bf, cf, t;
2542
            af = float32_to_float64(a->f[i], &env->vec_status);
2543
            bf = float32_to_float64(b->f[i], &env->vec_status);
2544
            cf = float32_to_float64(c->f[i], &env->vec_status);
2545
            t = float64_mul(af, cf, &env->vec_status);
2546
            t = float64_sub(t, bf, &env->vec_status);
2547
            t = float64_chs(t);
2548
            r->f[i] = float64_to_float32(t, &env->vec_status);
2549
        }
2550
    }
2551
}
2552

    
2553
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2554
{
2555
    ppc_avr_t result;
2556
    int i;
2557
    VECTOR_FOR_INORDER_I (i, u8) {
2558
        int s = c->u8[i] & 0x1f;
2559
#if defined(HOST_WORDS_BIGENDIAN)
2560
        int index = s & 0xf;
2561
#else
2562
        int index = 15 - (s & 0xf);
2563
#endif
2564
        if (s & 0x10) {
2565
            result.u8[i] = b->u8[index];
2566
        } else {
2567
            result.u8[i] = a->u8[index];
2568
        }
2569
    }
2570
    *r = result;
2571
}
2572

    
2573
#if defined(HOST_WORDS_BIGENDIAN)
2574
#define PKBIG 1
2575
#else
2576
#define PKBIG 0
2577
#endif
2578
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2579
{
2580
    int i, j;
2581
    ppc_avr_t result;
2582
#if defined(HOST_WORDS_BIGENDIAN)
2583
    const ppc_avr_t *x[2] = { a, b };
2584
#else
2585
    const ppc_avr_t *x[2] = { b, a };
2586
#endif
2587

    
2588
    VECTOR_FOR_INORDER_I (i, u64) {
2589
        VECTOR_FOR_INORDER_I (j, u32){
2590
            uint32_t e = x[i]->u32[j];
2591
            result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2592
                                 ((e >> 6) & 0x3e0) |
2593
                                 ((e >> 3) & 0x1f));
2594
        }
2595
    }
2596
    *r = result;
2597
}
2598

    
2599
#define VPK(suffix, from, to, cvt, dosat)       \
2600
    void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2601
    {                                                                   \
2602
        int i;                                                          \
2603
        int sat = 0;                                                    \
2604
        ppc_avr_t result;                                               \
2605
        ppc_avr_t *a0 = PKBIG ? a : b;                                  \
2606
        ppc_avr_t *a1 = PKBIG ? b : a;                                  \
2607
        VECTOR_FOR_INORDER_I (i, from) {                                \
2608
            result.to[i] = cvt(a0->from[i], &sat);                      \
2609
            result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
2610
        }                                                               \
2611
        *r = result;                                                    \
2612
        if (dosat && sat) {                                             \
2613
            env->vscr |= (1 << VSCR_SAT);                               \
2614
        }                                                               \
2615
    }
2616
#define I(x, y) (x)
2617
VPK(shss, s16, s8, cvtshsb, 1)
2618
VPK(shus, s16, u8, cvtshub, 1)
2619
VPK(swss, s32, s16, cvtswsh, 1)
2620
VPK(swus, s32, u16, cvtswuh, 1)
2621
VPK(uhus, u16, u8, cvtuhub, 1)
2622
VPK(uwus, u32, u16, cvtuwuh, 1)
2623
VPK(uhum, u16, u8, I, 0)
2624
VPK(uwum, u32, u16, I, 0)
2625
#undef I
2626
#undef VPK
2627
#undef PKBIG
2628

    
2629
void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2630
{
2631
    int i;
2632
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2633
        HANDLE_NAN1(r->f[i], b->f[i]) {
2634
            r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2635
        }
2636
    }
2637
}
2638

    
2639
#define VRFI(suffix, rounding)                                          \
2640
    void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2641
    {                                                                   \
2642
        int i;                                                          \
2643
        float_status s = env->vec_status;                               \
2644
        set_float_rounding_mode(rounding, &s);                          \
2645
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2646
            HANDLE_NAN1(r->f[i], b->f[i]) {                             \
2647
                r->f[i] = float32_round_to_int (b->f[i], &s);           \
2648
            }                                                           \
2649
        }                                                               \
2650
    }
2651
VRFI(n, float_round_nearest_even)
2652
VRFI(m, float_round_down)
2653
VRFI(p, float_round_up)
2654
VRFI(z, float_round_to_zero)
2655
#undef VRFI
2656

    
2657
#define VROTATE(suffix, element)                                        \
2658
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2659
    {                                                                   \
2660
        int i;                                                          \
2661
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2662
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2663
            unsigned int shift = b->element[i] & mask;                  \
2664
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2665
        }                                                               \
2666
    }
2667
VROTATE(b, u8)
2668
VROTATE(h, u16)
2669
VROTATE(w, u32)
2670
#undef VROTATE
2671

    
2672
void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2673
{
2674
    int i;
2675
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2676
        HANDLE_NAN1(r->f[i], b->f[i]) {
2677
            float32 t = float32_sqrt(b->f[i], &env->vec_status);
2678
            r->f[i] = float32_div(float32_one, t, &env->vec_status);
2679
        }
2680
    }
2681
}
2682

    
2683
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2684
{
2685
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2686
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2687
}
2688

    
2689
void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
2690
{
2691
    int i;
2692
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2693
        HANDLE_NAN1(r->f[i], b->f[i]) {
2694
            r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2695
        }
2696
    }
2697
}
2698

    
2699
void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2700
{
2701
    int i;
2702
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2703
        HANDLE_NAN1(r->f[i], b->f[i]) {
2704
            r->f[i] = float32_log2(b->f[i], &env->vec_status);
2705
        }
2706
    }
2707
}
2708

    
2709
#if defined(HOST_WORDS_BIGENDIAN)
2710
#define LEFT 0
2711
#define RIGHT 1
2712
#else
2713
#define LEFT 1
2714
#define RIGHT 0
2715
#endif
2716
/* The specification says that the results are undefined if all of the
2717
 * shift counts are not identical.  We check to make sure that they are
2718
 * to conform to what real hardware appears to do.  */
2719
#define VSHIFT(suffix, leftp)                                           \
2720
    void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
2721
    {                                                                   \
2722
        int shift = b->u8[LO_IDX*15] & 0x7;                             \
2723
        int doit = 1;                                                   \
2724
        int i;                                                          \
2725
        for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
2726
            doit = doit && ((b->u8[i] & 0x7) == shift);                 \
2727
        }                                                               \
2728
        if (doit) {                                                     \
2729
            if (shift == 0) {                                           \
2730
                *r = *a;                                                \
2731
            } else if (leftp) {                                         \
2732
                uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
2733
                r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
2734
                r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
2735
            } else {                                                    \
2736
                uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
2737
                r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
2738
                r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
2739
            }                                                           \
2740
        }                                                               \
2741
    }
2742
VSHIFT(l, LEFT)
2743
VSHIFT(r, RIGHT)
2744
#undef VSHIFT
2745
#undef LEFT
2746
#undef RIGHT
2747

    
2748
#define VSL(suffix, element)                                            \
2749
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2750
    {                                                                   \
2751
        int i;                                                          \
2752
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2753
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2754
            unsigned int shift = b->element[i] & mask;                  \
2755
            r->element[i] = a->element[i] << shift;                     \
2756
        }                                                               \
2757
    }
2758
VSL(b, u8)
2759
VSL(h, u16)
2760
VSL(w, u32)
2761
#undef VSL
2762

    
2763
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2764
{
2765
    int sh = shift & 0xf;
2766
    int i;
2767
    ppc_avr_t result;
2768

    
2769
#if defined(HOST_WORDS_BIGENDIAN)
2770
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2771
        int index = sh + i;
2772
        if (index > 0xf) {
2773
            result.u8[i] = b->u8[index-0x10];
2774
        } else {
2775
            result.u8[i] = a->u8[index];
2776
        }
2777
    }
2778
#else
2779
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2780
        int index = (16 - sh) + i;
2781
        if (index > 0xf) {
2782
            result.u8[i] = a->u8[index-0x10];
2783
        } else {
2784
            result.u8[i] = b->u8[index];
2785
        }
2786
    }
2787
#endif
2788
    *r = result;
2789
}
2790

    
2791
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2792
{
2793
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2794

    
2795
#if defined (HOST_WORDS_BIGENDIAN)
2796
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2797
  memset (&r->u8[16-sh], 0, sh);
2798
#else
2799
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2800
  memset (&r->u8[0], 0, sh);
2801
#endif
2802
}
2803

    
2804
/* Experimental testing shows that hardware masks the immediate.  */
2805
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2806
#if defined(HOST_WORDS_BIGENDIAN)
2807
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2808
#else
2809
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2810
#endif
2811
#define VSPLT(suffix, element)                                          \
2812
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2813
    {                                                                   \
2814
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2815
        int i;                                                          \
2816
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2817
            r->element[i] = s;                                          \
2818
        }                                                               \
2819
    }
2820
VSPLT(b, u8)
2821
VSPLT(h, u16)
2822
VSPLT(w, u32)
2823
#undef VSPLT
2824
#undef SPLAT_ELEMENT
2825
#undef _SPLAT_MASKED
2826

    
2827
#define VSPLTI(suffix, element, splat_type)                     \
2828
    void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat)  \
2829
    {                                                           \
2830
        splat_type x = (int8_t)(splat << 3) >> 3;               \
2831
        int i;                                                  \
2832
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
2833
            r->element[i] = x;                                  \
2834
        }                                                       \
2835
    }
2836
VSPLTI(b, s8, int8_t)
2837
VSPLTI(h, s16, int16_t)
2838
VSPLTI(w, s32, int32_t)
2839
#undef VSPLTI
2840

    
2841
#define VSR(suffix, element)                                            \
2842
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2843
    {                                                                   \
2844
        int i;                                                          \
2845
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2846
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2847
            unsigned int shift = b->element[i] & mask;                  \
2848
            r->element[i] = a->element[i] >> shift;                     \
2849
        }                                                               \
2850
    }
2851
VSR(ab, s8)
2852
VSR(ah, s16)
2853
VSR(aw, s32)
2854
VSR(b, u8)
2855
VSR(h, u16)
2856
VSR(w, u32)
2857
#undef VSR
2858

    
2859
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2860
{
2861
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2862

    
2863
#if defined (HOST_WORDS_BIGENDIAN)
2864
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2865
  memset (&r->u8[0], 0, sh);
2866
#else
2867
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2868
  memset (&r->u8[16-sh], 0, sh);
2869
#endif
2870
}
2871

    
2872
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2873
{
2874
    int i;
2875
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2876
        r->u32[i] = a->u32[i] >= b->u32[i];
2877
    }
2878
}
2879

    
2880
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2881
{
2882
    int64_t t;
2883
    int i, upper;
2884
    ppc_avr_t result;
2885
    int sat = 0;
2886

    
2887
#if defined(HOST_WORDS_BIGENDIAN)
2888
    upper = ARRAY_SIZE(r->s32)-1;
2889
#else
2890
    upper = 0;
2891
#endif
2892
    t = (int64_t)b->s32[upper];
2893
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2894
        t += a->s32[i];
2895
        result.s32[i] = 0;
2896
    }
2897
    result.s32[upper] = cvtsdsw(t, &sat);
2898
    *r = result;
2899

    
2900
    if (sat) {
2901
        env->vscr |= (1 << VSCR_SAT);
2902
    }
2903
}
2904

    
2905
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2906
{
2907
    int i, j, upper;
2908
    ppc_avr_t result;
2909
    int sat = 0;
2910

    
2911
#if defined(HOST_WORDS_BIGENDIAN)
2912
    upper = 1;
2913
#else
2914
    upper = 0;
2915
#endif
2916
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2917
        int64_t t = (int64_t)b->s32[upper+i*2];
2918
        result.u64[i] = 0;
2919
        for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2920
            t += a->s32[2*i+j];
2921
        }
2922
        result.s32[upper+i*2] = cvtsdsw(t, &sat);
2923
    }
2924

    
2925
    *r = result;
2926
    if (sat) {
2927
        env->vscr |= (1 << VSCR_SAT);
2928
    }
2929
}
2930

    
2931
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2932
{
2933
    int i, j;
2934
    int sat = 0;
2935

    
2936
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2937
        int64_t t = (int64_t)b->s32[i];
2938
        for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2939
            t += a->s8[4*i+j];
2940
        }
2941
        r->s32[i] = cvtsdsw(t, &sat);
2942
    }
2943

    
2944
    if (sat) {
2945
        env->vscr |= (1 << VSCR_SAT);
2946
    }
2947
}
2948

    
2949
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2950
{
2951
    int sat = 0;
2952
    int i;
2953

    
2954
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2955
        int64_t t = (int64_t)b->s32[i];
2956
        t += a->s16[2*i] + a->s16[2*i+1];
2957
        r->s32[i] = cvtsdsw(t, &sat);
2958
    }
2959

    
2960
    if (sat) {
2961
        env->vscr |= (1 << VSCR_SAT);
2962
    }
2963
}
2964

    
2965
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2966
{
2967
    int i, j;
2968
    int sat = 0;
2969

    
2970
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2971
        uint64_t t = (uint64_t)b->u32[i];
2972
        for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2973
            t += a->u8[4*i+j];
2974
        }
2975
        r->u32[i] = cvtuduw(t, &sat);
2976
    }
2977

    
2978
    if (sat) {
2979
        env->vscr |= (1 << VSCR_SAT);
2980
    }
2981
}
2982

    
2983
#if defined(HOST_WORDS_BIGENDIAN)
2984
#define UPKHI 1
2985
#define UPKLO 0
2986
#else
2987
#define UPKHI 0
2988
#define UPKLO 1
2989
#endif
2990
#define VUPKPX(suffix, hi)                                      \
2991
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
2992
    {                                                           \
2993
        int i;                                                  \
2994
        ppc_avr_t result;                                       \
2995
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
2996
            uint16_t e = b->u16[hi ? i : i+4];                  \
2997
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
2998
            uint8_t r = (e >> 10) & 0x1f;                       \
2999
            uint8_t g = (e >> 5) & 0x1f;                        \
3000
            uint8_t b = e & 0x1f;                               \
3001
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
3002
        }                                                               \
3003
        *r = result;                                                    \
3004
    }
3005
VUPKPX(lpx, UPKLO)
3006
VUPKPX(hpx, UPKHI)
3007
#undef VUPKPX
3008

    
3009
#define VUPK(suffix, unpacked, packee, hi)                              \
3010
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
3011
    {                                                                   \
3012
        int i;                                                          \
3013
        ppc_avr_t result;                                               \
3014
        if (hi) {                                                       \
3015
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
3016
                result.unpacked[i] = b->packee[i];                      \
3017
            }                                                           \
3018
        } else {                                                        \
3019
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3020
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3021
            }                                                           \
3022
        }                                                               \
3023
        *r = result;                                                    \
3024
    }
3025
VUPK(hsb, s16, s8, UPKHI)
3026
VUPK(hsh, s32, s16, UPKHI)
3027
VUPK(lsb, s16, s8, UPKLO)
3028
VUPK(lsh, s32, s16, UPKLO)
3029
#undef VUPK
3030
#undef UPKHI
3031
#undef UPKLO
3032

    
3033
#undef DO_HANDLE_NAN
3034
#undef HANDLE_NAN1
3035
#undef HANDLE_NAN2
3036
#undef HANDLE_NAN3
3037
#undef VECTOR_FOR_INORDER_I
3038
#undef HI_IDX
3039
#undef LO_IDX
3040

    
3041
/*****************************************************************************/
3042
/* SPE extension helpers */
3043
/* Use a table to make this quicker */
3044
static uint8_t hbrev[16] = {
3045
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3046
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3047
};
3048

    
3049
static inline uint8_t byte_reverse(uint8_t val)
3050
{
3051
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3052
}
3053

    
3054
static inline uint32_t word_reverse(uint32_t val)
3055
{
3056
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3057
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3058
}
3059

    
3060
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3061
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3062
{
3063
    uint32_t a, b, d, mask;
3064

    
3065
    mask = UINT32_MAX >> (32 - MASKBITS);
3066
    a = arg1 & mask;
3067
    b = arg2 & mask;
3068
    d = word_reverse(1 + word_reverse(a | ~b));
3069
    return (arg1 & ~mask) | (d & b);
3070
}
3071

    
3072
uint32_t helper_cntlsw32 (uint32_t val)
3073
{
3074
    if (val & 0x80000000)
3075
        return clz32(~val);
3076
    else
3077
        return clz32(val);
3078
}
3079

    
3080
uint32_t helper_cntlzw32 (uint32_t val)
3081
{
3082
    return clz32(val);
3083
}
3084

    
3085
/* Single-precision floating-point conversions */
3086
static inline uint32_t efscfsi(uint32_t val)
3087
{
3088
    CPU_FloatU u;
3089

    
3090
    u.f = int32_to_float32(val, &env->vec_status);
3091

    
3092
    return u.l;
3093
}
3094

    
3095
static inline uint32_t efscfui(uint32_t val)
3096
{
3097
    CPU_FloatU u;
3098

    
3099
    u.f = uint32_to_float32(val, &env->vec_status);
3100

    
3101
    return u.l;
3102
}
3103

    
3104
static inline int32_t efsctsi(uint32_t val)
3105
{
3106
    CPU_FloatU u;
3107

    
3108
    u.l = val;
3109
    /* NaN are not treated the same way IEEE 754 does */
3110
    if (unlikely(float32_is_quiet_nan(u.f)))
3111
        return 0;
3112

    
3113
    return float32_to_int32(u.f, &env->vec_status);
3114
}
3115

    
3116
static inline uint32_t efsctui(uint32_t val)
3117
{
3118
    CPU_FloatU u;
3119

    
3120
    u.l = val;
3121
    /* NaN are not treated the same way IEEE 754 does */
3122
    if (unlikely(float32_is_quiet_nan(u.f)))
3123
        return 0;
3124

    
3125
    return float32_to_uint32(u.f, &env->vec_status);
3126
}
3127

    
3128
static inline uint32_t efsctsiz(uint32_t val)
3129
{
3130
    CPU_FloatU u;
3131

    
3132
    u.l = val;
3133
    /* NaN are not treated the same way IEEE 754 does */
3134
    if (unlikely(float32_is_quiet_nan(u.f)))
3135
        return 0;
3136

    
3137
    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3138
}
3139

    
3140
static inline uint32_t efsctuiz(uint32_t val)
3141
{
3142
    CPU_FloatU u;
3143

    
3144
    u.l = val;
3145
    /* NaN are not treated the same way IEEE 754 does */
3146
    if (unlikely(float32_is_quiet_nan(u.f)))
3147
        return 0;
3148

    
3149
    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3150
}
3151

    
3152
static inline uint32_t efscfsf(uint32_t val)
3153
{
3154
    CPU_FloatU u;
3155
    float32 tmp;
3156

    
3157
    u.f = int32_to_float32(val, &env->vec_status);
3158
    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3159
    u.f = float32_div(u.f, tmp, &env->vec_status);
3160

    
3161
    return u.l;
3162
}
3163

    
3164
static inline uint32_t efscfuf(uint32_t val)
3165
{
3166
    CPU_FloatU u;
3167
    float32 tmp;
3168

    
3169
    u.f = uint32_to_float32(val, &env->vec_status);
3170
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3171
    u.f = float32_div(u.f, tmp, &env->vec_status);
3172

    
3173
    return u.l;
3174
}
3175

    
3176
static inline uint32_t efsctsf(uint32_t val)
3177
{
3178
    CPU_FloatU u;
3179
    float32 tmp;
3180

    
3181
    u.l = val;
3182
    /* NaN are not treated the same way IEEE 754 does */
3183
    if (unlikely(float32_is_quiet_nan(u.f)))
3184
        return 0;
3185
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3186
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3187

    
3188
    return float32_to_int32(u.f, &env->vec_status);
3189
}
3190

    
3191
static inline uint32_t efsctuf(uint32_t val)
3192
{
3193
    CPU_FloatU u;
3194
    float32 tmp;
3195

    
3196
    u.l = val;
3197
    /* NaN are not treated the same way IEEE 754 does */
3198
    if (unlikely(float32_is_quiet_nan(u.f)))
3199
        return 0;
3200
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3201
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3202

    
3203
    return float32_to_uint32(u.f, &env->vec_status);
3204
}
3205

    
3206
#define HELPER_SPE_SINGLE_CONV(name)                                          \
3207
uint32_t helper_e##name (uint32_t val)                                        \
3208
{                                                                             \
3209
    return e##name(val);                                                      \
3210
}
3211
/* efscfsi */
3212
HELPER_SPE_SINGLE_CONV(fscfsi);
3213
/* efscfui */
3214
HELPER_SPE_SINGLE_CONV(fscfui);
3215
/* efscfuf */
3216
HELPER_SPE_SINGLE_CONV(fscfuf);
3217
/* efscfsf */
3218
HELPER_SPE_SINGLE_CONV(fscfsf);
3219
/* efsctsi */
3220
HELPER_SPE_SINGLE_CONV(fsctsi);
3221
/* efsctui */
3222
HELPER_SPE_SINGLE_CONV(fsctui);
3223
/* efsctsiz */
3224
HELPER_SPE_SINGLE_CONV(fsctsiz);
3225
/* efsctuiz */
3226
HELPER_SPE_SINGLE_CONV(fsctuiz);
3227
/* efsctsf */
3228
HELPER_SPE_SINGLE_CONV(fsctsf);
3229
/* efsctuf */
3230
HELPER_SPE_SINGLE_CONV(fsctuf);
3231

    
3232
#define HELPER_SPE_VECTOR_CONV(name)                                          \
3233
uint64_t helper_ev##name (uint64_t val)                                       \
3234
{                                                                             \
3235
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
3236
            (uint64_t)e##name(val);                                           \
3237
}
3238
/* evfscfsi */
3239
HELPER_SPE_VECTOR_CONV(fscfsi);
3240
/* evfscfui */
3241
HELPER_SPE_VECTOR_CONV(fscfui);
3242
/* evfscfuf */
3243
HELPER_SPE_VECTOR_CONV(fscfuf);
3244
/* evfscfsf */
3245
HELPER_SPE_VECTOR_CONV(fscfsf);
3246
/* evfsctsi */
3247
HELPER_SPE_VECTOR_CONV(fsctsi);
3248
/* evfsctui */
3249
HELPER_SPE_VECTOR_CONV(fsctui);
3250
/* evfsctsiz */
3251
HELPER_SPE_VECTOR_CONV(fsctsiz);
3252
/* evfsctuiz */
3253
HELPER_SPE_VECTOR_CONV(fsctuiz);
3254
/* evfsctsf */
3255
HELPER_SPE_VECTOR_CONV(fsctsf);
3256
/* evfsctuf */
3257
HELPER_SPE_VECTOR_CONV(fsctuf);
3258

    
3259
/* Single-precision floating-point arithmetic */
3260
static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3261
{
3262
    CPU_FloatU u1, u2;
3263
    u1.l = op1;
3264
    u2.l = op2;
3265
    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3266
    return u1.l;
3267
}
3268

    
3269
static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3270
{
3271
    CPU_FloatU u1, u2;
3272
    u1.l = op1;
3273
    u2.l = op2;
3274
    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3275
    return u1.l;
3276
}
3277

    
3278
static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3279
{
3280
    CPU_FloatU u1, u2;
3281
    u1.l = op1;
3282
    u2.l = op2;
3283
    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3284
    return u1.l;
3285
}
3286

    
3287
static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3288
{
3289
    CPU_FloatU u1, u2;
3290
    u1.l = op1;
3291
    u2.l = op2;
3292
    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3293
    return u1.l;
3294
}
3295

    
3296
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
3297
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3298
{                                                                             \
3299
    return e##name(op1, op2);                                                 \
3300
}
3301
/* efsadd */
3302
HELPER_SPE_SINGLE_ARITH(fsadd);
3303
/* efssub */
3304
HELPER_SPE_SINGLE_ARITH(fssub);
3305
/* efsmul */
3306
HELPER_SPE_SINGLE_ARITH(fsmul);
3307
/* efsdiv */
3308
HELPER_SPE_SINGLE_ARITH(fsdiv);
3309

    
3310
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
3311
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3312
{                                                                             \
3313
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
3314
            (uint64_t)e##name(op1, op2);                                      \
3315
}
3316
/* evfsadd */
3317
HELPER_SPE_VECTOR_ARITH(fsadd);
3318
/* evfssub */
3319
HELPER_SPE_VECTOR_ARITH(fssub);
3320
/* evfsmul */
3321
HELPER_SPE_VECTOR_ARITH(fsmul);
3322
/* evfsdiv */
3323
HELPER_SPE_VECTOR_ARITH(fsdiv);
3324

    
3325
/* Single-precision floating-point comparisons */
3326
static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3327
{
3328
    CPU_FloatU u1, u2;
3329
    u1.l = op1;
3330
    u2.l = op2;
3331
    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3332
}
3333

    
3334
static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3335
{
3336
    CPU_FloatU u1, u2;
3337
    u1.l = op1;
3338
    u2.l = op2;
3339
    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3340
}
3341

    
3342
static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3343
{
3344
    CPU_FloatU u1, u2;
3345
    u1.l = op1;
3346
    u2.l = op2;
3347
    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3348
}
3349

    
3350
static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3351
{
3352
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3353
    return efststlt(op1, op2);
3354
}
3355

    
3356
static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3357
{
3358
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3359
    return efststgt(op1, op2);
3360
}
3361

    
3362
static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3363
{
3364
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3365
    return efststeq(op1, op2);
3366
}
3367

    
3368
#define HELPER_SINGLE_SPE_CMP(name)                                           \
3369
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3370
{                                                                             \
3371
    return e##name(op1, op2) << 2;                                            \
3372
}
3373
/* efststlt */
3374
HELPER_SINGLE_SPE_CMP(fststlt);
3375
/* efststgt */
3376
HELPER_SINGLE_SPE_CMP(fststgt);
3377
/* efststeq */
3378
HELPER_SINGLE_SPE_CMP(fststeq);
3379
/* efscmplt */
3380
HELPER_SINGLE_SPE_CMP(fscmplt);
3381
/* efscmpgt */
3382
HELPER_SINGLE_SPE_CMP(fscmpgt);
3383
/* efscmpeq */
3384
HELPER_SINGLE_SPE_CMP(fscmpeq);
3385

    
3386
static inline uint32_t evcmp_merge(int t0, int t1)
3387
{
3388
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3389
}
3390

    
3391
#define HELPER_VECTOR_SPE_CMP(name)                                           \
3392
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3393
{                                                                             \
3394
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
3395
}
3396
/* evfststlt */
3397
HELPER_VECTOR_SPE_CMP(fststlt);
3398
/* evfststgt */
3399
HELPER_VECTOR_SPE_CMP(fststgt);
3400
/* evfststeq */
3401
HELPER_VECTOR_SPE_CMP(fststeq);
3402
/* evfscmplt */
3403
HELPER_VECTOR_SPE_CMP(fscmplt);
3404
/* evfscmpgt */
3405
HELPER_VECTOR_SPE_CMP(fscmpgt);
3406
/* evfscmpeq */
3407
HELPER_VECTOR_SPE_CMP(fscmpeq);
3408

    
3409
/* Double-precision floating-point conversion */
3410
uint64_t helper_efdcfsi (uint32_t val)
3411
{
3412
    CPU_DoubleU u;
3413

    
3414
    u.d = int32_to_float64(val, &env->vec_status);
3415

    
3416
    return u.ll;
3417
}
3418

    
3419
uint64_t helper_efdcfsid (uint64_t val)
3420
{
3421
    CPU_DoubleU u;
3422

    
3423
    u.d = int64_to_float64(val, &env->vec_status);
3424

    
3425
    return u.ll;
3426
}
3427

    
3428
uint64_t helper_efdcfui (uint32_t val)
3429
{
3430
    CPU_DoubleU u;
3431

    
3432
    u.d = uint32_to_float64(val, &env->vec_status);
3433

    
3434
    return u.ll;
3435
}
3436

    
3437
uint64_t helper_efdcfuid (uint64_t val)
3438
{
3439
    CPU_DoubleU u;
3440

    
3441
    u.d = uint64_to_float64(val, &env->vec_status);
3442

    
3443
    return u.ll;
3444
}
3445

    
3446
uint32_t helper_efdctsi (uint64_t val)
3447
{
3448
    CPU_DoubleU u;
3449

    
3450
    u.ll = val;
3451
    /* NaN are not treated the same way IEEE 754 does */
3452
    if (unlikely(float64_is_any_nan(u.d))) {
3453
        return 0;
3454
    }
3455

    
3456
    return float64_to_int32(u.d, &env->vec_status);
3457
}
3458

    
3459
uint32_t helper_efdctui (uint64_t val)
3460
{
3461
    CPU_DoubleU u;
3462

    
3463
    u.ll = val;
3464
    /* NaN are not treated the same way IEEE 754 does */
3465
    if (unlikely(float64_is_any_nan(u.d))) {
3466
        return 0;
3467
    }
3468

    
3469
    return float64_to_uint32(u.d, &env->vec_status);
3470
}
3471

    
3472
uint32_t helper_efdctsiz (uint64_t val)
3473
{
3474
    CPU_DoubleU u;
3475

    
3476
    u.ll = val;
3477
    /* NaN are not treated the same way IEEE 754 does */
3478
    if (unlikely(float64_is_any_nan(u.d))) {
3479
        return 0;
3480
    }
3481

    
3482
    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3483
}
3484

    
3485
uint64_t helper_efdctsidz (uint64_t val)
3486
{
3487
    CPU_DoubleU u;
3488

    
3489
    u.ll = val;
3490
    /* NaN are not treated the same way IEEE 754 does */
3491
    if (unlikely(float64_is_any_nan(u.d))) {
3492
        return 0;
3493
    }
3494

    
3495
    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3496
}
3497

    
3498
uint32_t helper_efdctuiz (uint64_t val)
3499
{
3500
    CPU_DoubleU u;
3501

    
3502
    u.ll = val;
3503
    /* NaN are not treated the same way IEEE 754 does */
3504
    if (unlikely(float64_is_any_nan(u.d))) {
3505
        return 0;
3506
    }
3507

    
3508
    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3509
}
3510

    
3511
uint64_t helper_efdctuidz (uint64_t val)
3512
{
3513
    CPU_DoubleU u;
3514

    
3515
    u.ll = val;
3516
    /* NaN are not treated the same way IEEE 754 does */
3517
    if (unlikely(float64_is_any_nan(u.d))) {
3518
        return 0;
3519
    }
3520

    
3521
    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3522
}
3523

    
3524
uint64_t helper_efdcfsf (uint32_t val)
3525
{
3526
    CPU_DoubleU u;
3527
    float64 tmp;
3528

    
3529
    u.d = int32_to_float64(val, &env->vec_status);
3530
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3531
    u.d = float64_div(u.d, tmp, &env->vec_status);
3532

    
3533
    return u.ll;
3534
}
3535

    
3536
uint64_t helper_efdcfuf (uint32_t val)
3537
{
3538
    CPU_DoubleU u;
3539
    float64 tmp;
3540

    
3541
    u.d = uint32_to_float64(val, &env->vec_status);
3542
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3543
    u.d = float64_div(u.d, tmp, &env->vec_status);
3544

    
3545
    return u.ll;
3546
}
3547

    
3548
uint32_t helper_efdctsf (uint64_t val)
3549
{
3550
    CPU_DoubleU u;
3551
    float64 tmp;
3552

    
3553
    u.ll = val;
3554
    /* NaN are not treated the same way IEEE 754 does */
3555
    if (unlikely(float64_is_any_nan(u.d))) {
3556
        return 0;
3557
    }
3558
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3559
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3560

    
3561
    return float64_to_int32(u.d, &env->vec_status);
3562
}
3563

    
3564
uint32_t helper_efdctuf (uint64_t val)
3565
{
3566
    CPU_DoubleU u;
3567
    float64 tmp;
3568

    
3569
    u.ll = val;
3570
    /* NaN are not treated the same way IEEE 754 does */
3571
    if (unlikely(float64_is_any_nan(u.d))) {
3572
        return 0;
3573
    }
3574
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3575
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3576

    
3577
    return float64_to_uint32(u.d, &env->vec_status);
3578
}
3579

    
3580
uint32_t helper_efscfd (uint64_t val)
3581
{
3582
    CPU_DoubleU u1;
3583
    CPU_FloatU u2;
3584

    
3585
    u1.ll = val;
3586
    u2.f = float64_to_float32(u1.d, &env->vec_status);
3587

    
3588
    return u2.l;
3589
}
3590

    
3591
uint64_t helper_efdcfs (uint32_t val)
3592
{
3593
    CPU_DoubleU u2;
3594
    CPU_FloatU u1;
3595

    
3596
    u1.l = val;
3597
    u2.d = float32_to_float64(u1.f, &env->vec_status);
3598

    
3599
    return u2.ll;
3600
}
3601

    
3602
/* Double precision fixed-point arithmetic */
3603
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3604
{
3605
    CPU_DoubleU u1, u2;
3606
    u1.ll = op1;
3607
    u2.ll = op2;
3608
    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3609
    return u1.ll;
3610
}
3611

    
3612
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3613
{
3614
    CPU_DoubleU u1, u2;
3615
    u1.ll = op1;
3616
    u2.ll = op2;
3617
    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3618
    return u1.ll;
3619
}
3620

    
3621
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3622
{
3623
    CPU_DoubleU u1, u2;
3624
    u1.ll = op1;
3625
    u2.ll = op2;
3626
    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3627
    return u1.ll;
3628
}
3629

    
3630
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3631
{
3632
    CPU_DoubleU u1, u2;
3633
    u1.ll = op1;
3634
    u2.ll = op2;
3635
    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3636
    return u1.ll;
3637
}
3638

    
3639
/* Double precision floating point helpers */
3640
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3641
{
3642
    CPU_DoubleU u1, u2;
3643
    u1.ll = op1;
3644
    u2.ll = op2;
3645
    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3646
}
3647

    
3648
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3649
{
3650
    CPU_DoubleU u1, u2;
3651
    u1.ll = op1;
3652
    u2.ll = op2;
3653
    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3654
}
3655

    
3656
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3657
{
3658
    CPU_DoubleU u1, u2;
3659
    u1.ll = op1;
3660
    u2.ll = op2;
3661
    return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3662
}
3663

    
3664
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3665
{
3666
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3667
    return helper_efdtstlt(op1, op2);
3668
}
3669

    
3670
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3671
{
3672
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3673
    return helper_efdtstgt(op1, op2);
3674
}
3675

    
3676
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3677
{
3678
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3679
    return helper_efdtsteq(op1, op2);
3680
}
3681

    
3682
/*****************************************************************************/
3683
/* Softmmu support */
3684
#if !defined (CONFIG_USER_ONLY)
3685

    
3686
#define MMUSUFFIX _mmu
3687

    
3688
#define SHIFT 0
3689
#include "softmmu_template.h"
3690

    
3691
#define SHIFT 1
3692
#include "softmmu_template.h"
3693

    
3694
#define SHIFT 2
3695
#include "softmmu_template.h"
3696

    
3697
#define SHIFT 3
3698
#include "softmmu_template.h"
3699

    
3700
/* try to fill the TLB and return an exception if error. If retaddr is
3701
   NULL, it means that the function was called in C code (i.e. not
3702
   from generated code or from helper.c) */
3703
/* XXX: fix it to restore all registers */
3704
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3705
{
3706
    TranslationBlock *tb;
3707
    CPUState *saved_env;
3708
    unsigned long pc;
3709
    int ret;
3710

    
3711
    /* XXX: hack to restore env in all cases, even if not called from
3712
       generated code */
3713
    saved_env = env;
3714
    env = cpu_single_env;
3715
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3716
    if (unlikely(ret != 0)) {
3717
        if (likely(retaddr)) {
3718
            /* now we have a real cpu fault */
3719
            pc = (unsigned long)retaddr;
3720
            tb = tb_find_pc(pc);
3721
            if (likely(tb)) {
3722
                /* the PC is inside the translated code. It means that we have
3723
                   a virtual CPU fault */
3724
                cpu_restore_state(tb, env, pc, NULL);
3725
            }
3726
        }
3727
        helper_raise_exception_err(env->exception_index, env->error_code);
3728
    }
3729
    env = saved_env;
3730
}
3731

    
3732
/* Segment registers load and store */
3733
target_ulong helper_load_sr (target_ulong sr_num)
3734
{
3735
#if defined(TARGET_PPC64)
3736
    if (env->mmu_model & POWERPC_MMU_64)
3737
        return ppc_load_sr(env, sr_num);
3738
#endif
3739
    return env->sr[sr_num];
3740
}
3741

    
3742
void helper_store_sr (target_ulong sr_num, target_ulong val)
3743
{
3744
    ppc_store_sr(env, sr_num, val);
3745
}
3746

    
3747
/* SLB management */
3748
#if defined(TARGET_PPC64)
3749
target_ulong helper_load_slb (target_ulong slb_nr)
3750
{
3751
    return ppc_load_slb(env, slb_nr);
3752
}
3753

    
3754
void helper_store_slb (target_ulong rb, target_ulong rs)
3755
{
3756
    ppc_store_slb(env, rb, rs);
3757
}
3758

    
3759
void helper_slbia (void)
3760
{
3761
    ppc_slb_invalidate_all(env);
3762
}
3763

    
3764
void helper_slbie (target_ulong addr)
3765
{
3766
    ppc_slb_invalidate_one(env, addr);
3767
}
3768

    
3769
#endif /* defined(TARGET_PPC64) */
3770

    
3771
/* TLB management */
3772
void helper_tlbia (void)
3773
{
3774
    ppc_tlb_invalidate_all(env);
3775
}
3776

    
3777
void helper_tlbie (target_ulong addr)
3778
{
3779
    ppc_tlb_invalidate_one(env, addr);
3780
}
3781

    
3782
/* Software driven TLBs management */
3783
/* PowerPC 602/603 software TLB load instructions helpers */
3784
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3785
{
3786
    target_ulong RPN, CMP, EPN;
3787
    int way;
3788

    
3789
    RPN = env->spr[SPR_RPA];
3790
    if (is_code) {
3791
        CMP = env->spr[SPR_ICMP];
3792
        EPN = env->spr[SPR_IMISS];
3793
    } else {
3794
        CMP = env->spr[SPR_DCMP];
3795
        EPN = env->spr[SPR_DMISS];
3796
    }
3797
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3798
    (void)EPN; /* avoid a compiler warning */
3799
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3800
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3801
              RPN, way);
3802
    /* Store this TLB */
3803
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3804
                     way, is_code, CMP, RPN);
3805
}
3806

    
3807
void helper_6xx_tlbd (target_ulong EPN)
3808
{
3809
    do_6xx_tlb(EPN, 0);
3810
}
3811

    
3812
void helper_6xx_tlbi (target_ulong EPN)
3813
{
3814
    do_6xx_tlb(EPN, 1);
3815
}
3816

    
3817
/* PowerPC 74xx software TLB load instructions helpers */
3818
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3819
{
3820
    target_ulong RPN, CMP, EPN;
3821
    int way;
3822

    
3823
    RPN = env->spr[SPR_PTELO];
3824
    CMP = env->spr[SPR_PTEHI];
3825
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3826
    way = env->spr[SPR_TLBMISS] & 0x3;
3827
    (void)EPN; /* avoid a compiler warning */
3828
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3829
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3830
              RPN, way);
3831
    /* Store this TLB */
3832
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3833
                     way, is_code, CMP, RPN);
3834
}
3835

    
3836
void helper_74xx_tlbd (target_ulong EPN)
3837
{
3838
    do_74xx_tlb(EPN, 0);
3839
}
3840

    
3841
void helper_74xx_tlbi (target_ulong EPN)
3842
{
3843
    do_74xx_tlb(EPN, 1);
3844
}
3845

    
3846
static inline target_ulong booke_tlb_to_page_size(int size)
3847
{
3848
    return 1024 << (2 * size);
3849
}
3850

    
3851
static inline int booke_page_size_to_tlb(target_ulong page_size)
3852
{
3853
    int size;
3854

    
3855
    switch (page_size) {
3856
    case 0x00000400UL:
3857
        size = 0x0;
3858
        break;
3859
    case 0x00001000UL:
3860
        size = 0x1;
3861
        break;
3862
    case 0x00004000UL:
3863
        size = 0x2;
3864
        break;
3865
    case 0x00010000UL:
3866
        size = 0x3;
3867
        break;
3868
    case 0x00040000UL:
3869
        size = 0x4;
3870
        break;
3871
    case 0x00100000UL:
3872
        size = 0x5;
3873
        break;
3874
    case 0x00400000UL:
3875
        size = 0x6;
3876
        break;
3877
    case 0x01000000UL:
3878
        size = 0x7;
3879
        break;
3880
    case 0x04000000UL:
3881
        size = 0x8;
3882
        break;
3883
    case 0x10000000UL:
3884
        size = 0x9;
3885
        break;
3886
    case 0x40000000UL:
3887
        size = 0xA;
3888
        break;
3889
#if defined (TARGET_PPC64)
3890
    case 0x000100000000ULL:
3891
        size = 0xB;
3892
        break;
3893
    case 0x000400000000ULL:
3894
        size = 0xC;
3895
        break;
3896
    case 0x001000000000ULL:
3897
        size = 0xD;
3898
        break;
3899
    case 0x004000000000ULL:
3900
        size = 0xE;
3901
        break;
3902
    case 0x010000000000ULL:
3903
        size = 0xF;
3904
        break;
3905
#endif
3906
    default:
3907
        size = -1;
3908
        break;
3909
    }
3910

    
3911
    return size;
3912
}
3913

    
3914
/* Helpers for 4xx TLB management */
3915
#define PPC4XX_TLB_ENTRY_MASK       0x0000003f  /* Mask for 64 TLB entries */
3916

    
3917
#define PPC4XX_TLBHI_V              0x00000040
3918
#define PPC4XX_TLBHI_E              0x00000020
3919
#define PPC4XX_TLBHI_SIZE_MIN       0
3920
#define PPC4XX_TLBHI_SIZE_MAX       7
3921
#define PPC4XX_TLBHI_SIZE_DEFAULT   1
3922
#define PPC4XX_TLBHI_SIZE_SHIFT     7
3923
#define PPC4XX_TLBHI_SIZE_MASK      0x00000007
3924

    
3925
#define PPC4XX_TLBLO_EX             0x00000200
3926
#define PPC4XX_TLBLO_WR             0x00000100
3927
#define PPC4XX_TLBLO_ATTR_MASK      0x000000FF
3928
#define PPC4XX_TLBLO_RPN_MASK       0xFFFFFC00
3929

    
3930
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3931
{
3932
    ppcemb_tlb_t *tlb;
3933
    target_ulong ret;
3934
    int size;
3935

    
3936
    entry &= PPC4XX_TLB_ENTRY_MASK;
3937
    tlb = &env->tlb[entry].tlbe;
3938
    ret = tlb->EPN;
3939
    if (tlb->prot & PAGE_VALID) {
3940
        ret |= PPC4XX_TLBHI_V;
3941
    }
3942
    size = booke_page_size_to_tlb(tlb->size);
3943
    if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
3944
        size = PPC4XX_TLBHI_SIZE_DEFAULT;
3945
    }
3946
    ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
3947
    env->spr[SPR_40x_PID] = tlb->PID;
3948
    return ret;
3949
}
3950

    
3951
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3952
{
3953
    ppcemb_tlb_t *tlb;
3954
    target_ulong ret;
3955

    
3956
    entry &= PPC4XX_TLB_ENTRY_MASK;
3957
    tlb = &env->tlb[entry].tlbe;
3958
    ret = tlb->RPN;
3959
    if (tlb->prot & PAGE_EXEC) {
3960
        ret |= PPC4XX_TLBLO_EX;
3961
    }
3962
    if (tlb->prot & PAGE_WRITE) {
3963
        ret |= PPC4XX_TLBLO_WR;
3964
    }
3965
    return ret;
3966
}
3967

    
3968
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3969
{
3970
    ppcemb_tlb_t *tlb;
3971
    target_ulong page, end;
3972

    
3973
    LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
3974
              val);
3975
    entry &= PPC4XX_TLB_ENTRY_MASK;
3976
    tlb = &env->tlb[entry].tlbe;
3977
    /* Invalidate previous TLB (if it's valid) */
3978
    if (tlb->prot & PAGE_VALID) {
3979
        end = tlb->EPN + tlb->size;
3980
        LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
3981
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
3982
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
3983
            tlb_flush_page(env, page);
3984
        }
3985
    }
3986
    tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
3987
                                       & PPC4XX_TLBHI_SIZE_MASK);
3988
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3989
     * If this ever occurs, one should use the ppcemb target instead
3990
     * of the ppc or ppc64 one
3991
     */
3992
    if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
3993
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3994
                  "are not supported (%d)\n",
3995
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3996
    }
3997
    tlb->EPN = val & ~(tlb->size - 1);
3998
    if (val & PPC4XX_TLBHI_V) {
3999
        tlb->prot |= PAGE_VALID;
4000
        if (val & PPC4XX_TLBHI_E) {
4001
            /* XXX: TO BE FIXED */
4002
            cpu_abort(env,
4003
                      "Little-endian TLB entries are not supported by now\n");
4004
        }
4005
    } else {
4006
        tlb->prot &= ~PAGE_VALID;
4007
    }
4008
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
4009
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4010
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4011
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4012
              tlb->prot & PAGE_READ ? 'r' : '-',
4013
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4014
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4015
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4016
    /* Invalidate new TLB (if valid) */
4017
    if (tlb->prot & PAGE_VALID) {
4018
        end = tlb->EPN + tlb->size;
4019
        LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4020
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4021
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4022
            tlb_flush_page(env, page);
4023
        }
4024
    }
4025
}
4026

    
4027
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4028
{
4029
    ppcemb_tlb_t *tlb;
4030

    
4031
    LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4032
              val);
4033
    entry &= PPC4XX_TLB_ENTRY_MASK;
4034
    tlb = &env->tlb[entry].tlbe;
4035
    tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4036
    tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4037
    tlb->prot = PAGE_READ;
4038
    if (val & PPC4XX_TLBLO_EX) {
4039
        tlb->prot |= PAGE_EXEC;
4040
    }
4041
    if (val & PPC4XX_TLBLO_WR) {
4042
        tlb->prot |= PAGE_WRITE;
4043
    }
4044
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4045
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4046
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4047
              tlb->prot & PAGE_READ ? 'r' : '-',
4048
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4049
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4050
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4051
}
4052

    
4053
target_ulong helper_4xx_tlbsx (target_ulong address)
4054
{
4055
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4056
}
4057

    
4058
/* PowerPC 440 TLB management */
4059
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4060
{
4061
    ppcemb_tlb_t *tlb;
4062
    target_ulong EPN, RPN, size;
4063
    int do_flush_tlbs;
4064

    
4065
    LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4066
              __func__, word, (int)entry, value);
4067
    do_flush_tlbs = 0;
4068
    entry &= 0x3F;
4069
    tlb = &env->tlb[entry].tlbe;
4070
    switch (word) {
4071
    default:
4072
        /* Just here to please gcc */
4073
    case 0:
4074
        EPN = value & 0xFFFFFC00;
4075
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4076
            do_flush_tlbs = 1;
4077
        tlb->EPN = EPN;
4078
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
4079
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4080
            do_flush_tlbs = 1;
4081
        tlb->size = size;
4082
        tlb->attr &= ~0x1;
4083
        tlb->attr |= (value >> 8) & 1;
4084
        if (value & 0x200) {
4085
            tlb->prot |= PAGE_VALID;
4086
        } else {
4087
            if (tlb->prot & PAGE_VALID) {
4088
                tlb->prot &= ~PAGE_VALID;
4089
                do_flush_tlbs = 1;
4090
            }
4091
        }
4092
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4093
        if (do_flush_tlbs)
4094
            tlb_flush(env, 1);
4095
        break;
4096
    case 1:
4097
        RPN = value & 0xFFFFFC0F;
4098
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4099
            tlb_flush(env, 1);
4100
        tlb->RPN = RPN;
4101
        break;
4102
    case 2:
4103
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4104
        tlb->prot = tlb->prot & PAGE_VALID;
4105
        if (value & 0x1)
4106
            tlb->prot |= PAGE_READ << 4;
4107
        if (value & 0x2)
4108
            tlb->prot |= PAGE_WRITE << 4;
4109
        if (value & 0x4)
4110
            tlb->prot |= PAGE_EXEC << 4;
4111
        if (value & 0x8)
4112
            tlb->prot |= PAGE_READ;
4113
        if (value & 0x10)
4114
            tlb->prot |= PAGE_WRITE;
4115
        if (value & 0x20)
4116
            tlb->prot |= PAGE_EXEC;
4117
        break;
4118
    }
4119
}
4120

    
4121
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4122
{
4123
    ppcemb_tlb_t *tlb;
4124
    target_ulong ret;
4125
    int size;
4126

    
4127
    entry &= 0x3F;
4128
    tlb = &env->tlb[entry].tlbe;
4129
    switch (word) {
4130
    default:
4131
        /* Just here to please gcc */
4132
    case 0:
4133
        ret = tlb->EPN;
4134
        size = booke_page_size_to_tlb(tlb->size);
4135
        if (size < 0 || size > 0xF)
4136
            size = 1;
4137
        ret |= size << 4;
4138
        if (tlb->attr & 0x1)
4139
            ret |= 0x100;
4140
        if (tlb->prot & PAGE_VALID)
4141
            ret |= 0x200;
4142
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4143
        env->spr[SPR_440_MMUCR] |= tlb->PID;
4144
        break;
4145
    case 1:
4146
        ret = tlb->RPN;
4147
        break;
4148
    case 2:
4149
        ret = tlb->attr & ~0x1;
4150
        if (tlb->prot & (PAGE_READ << 4))
4151
            ret |= 0x1;
4152
        if (tlb->prot & (PAGE_WRITE << 4))
4153
            ret |= 0x2;
4154
        if (tlb->prot & (PAGE_EXEC << 4))
4155
            ret |= 0x4;
4156
        if (tlb->prot & PAGE_READ)
4157
            ret |= 0x8;
4158
        if (tlb->prot & PAGE_WRITE)
4159
            ret |= 0x10;
4160
        if (tlb->prot & PAGE_EXEC)
4161
            ret |= 0x20;
4162
        break;
4163
    }
4164
    return ret;
4165
}
4166

    
4167
target_ulong helper_440_tlbsx (target_ulong address)
4168
{
4169
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4170
}
4171

    
4172
#endif /* !CONFIG_USER_ONLY */