Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ 96912e39

History | View | Annotate | Download (124.6 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <string.h>
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "helper.h"
23

    
24
#include "helper_regs.h"
25

    
26
//#define DEBUG_OP
27
//#define DEBUG_EXCEPTIONS
28
//#define DEBUG_SOFTWARE_TLB
29

    
30
#ifdef DEBUG_SOFTWARE_TLB
31
#  define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
32
#else
33
#  define LOG_SWTLB(...) do { } while (0)
34
#endif
35

    
36

    
37
/*****************************************************************************/
38
/* Exceptions processing helpers */
39

    
40
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
41
{
42
#if 0
43
    printf("Raise exception %3x code : %d\n", exception, error_code);
44
#endif
45
    env->exception_index = exception;
46
    env->error_code = error_code;
47
    cpu_loop_exit();
48
}
49

    
50
void helper_raise_exception (uint32_t exception)
51
{
52
    helper_raise_exception_err(exception, 0);
53
}
54

    
55
/*****************************************************************************/
56
/* SPR accesses */
57
void helper_load_dump_spr (uint32_t sprn)
58
{
59
    qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
60
             env->spr[sprn]);
61
}
62

    
63
void helper_store_dump_spr (uint32_t sprn)
64
{
65
    qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
66
             env->spr[sprn]);
67
}
68

    
69
target_ulong helper_load_tbl (void)
70
{
71
    return (target_ulong)cpu_ppc_load_tbl(env);
72
}
73

    
74
target_ulong helper_load_tbu (void)
75
{
76
    return cpu_ppc_load_tbu(env);
77
}
78

    
79
target_ulong helper_load_atbl (void)
80
{
81
    return (target_ulong)cpu_ppc_load_atbl(env);
82
}
83

    
84
target_ulong helper_load_atbu (void)
85
{
86
    return cpu_ppc_load_atbu(env);
87
}
88

    
89
target_ulong helper_load_601_rtcl (void)
90
{
91
    return cpu_ppc601_load_rtcl(env);
92
}
93

    
94
target_ulong helper_load_601_rtcu (void)
95
{
96
    return cpu_ppc601_load_rtcu(env);
97
}
98

    
99
#if !defined(CONFIG_USER_ONLY)
100
#if defined (TARGET_PPC64)
101
void helper_store_asr (target_ulong val)
102
{
103
    ppc_store_asr(env, val);
104
}
105
#endif
106

    
107
void helper_store_sdr1 (target_ulong val)
108
{
109
    ppc_store_sdr1(env, val);
110
}
111

    
112
void helper_store_tbl (target_ulong val)
113
{
114
    cpu_ppc_store_tbl(env, val);
115
}
116

    
117
void helper_store_tbu (target_ulong val)
118
{
119
    cpu_ppc_store_tbu(env, val);
120
}
121

    
122
void helper_store_atbl (target_ulong val)
123
{
124
    cpu_ppc_store_atbl(env, val);
125
}
126

    
127
void helper_store_atbu (target_ulong val)
128
{
129
    cpu_ppc_store_atbu(env, val);
130
}
131

    
132
void helper_store_601_rtcl (target_ulong val)
133
{
134
    cpu_ppc601_store_rtcl(env, val);
135
}
136

    
137
void helper_store_601_rtcu (target_ulong val)
138
{
139
    cpu_ppc601_store_rtcu(env, val);
140
}
141

    
142
target_ulong helper_load_decr (void)
143
{
144
    return cpu_ppc_load_decr(env);
145
}
146

    
147
void helper_store_decr (target_ulong val)
148
{
149
    cpu_ppc_store_decr(env, val);
150
}
151

    
152
void helper_store_hid0_601 (target_ulong val)
153
{
154
    target_ulong hid0;
155

    
156
    hid0 = env->spr[SPR_HID0];
157
    if ((val ^ hid0) & 0x00000008) {
158
        /* Change current endianness */
159
        env->hflags &= ~(1 << MSR_LE);
160
        env->hflags_nmsr &= ~(1 << MSR_LE);
161
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
162
        env->hflags |= env->hflags_nmsr;
163
        qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
164
                 val & 0x8 ? 'l' : 'b', env->hflags);
165
    }
166
    env->spr[SPR_HID0] = (uint32_t)val;
167
}
168

    
169
void helper_store_403_pbr (uint32_t num, target_ulong value)
170
{
171
    if (likely(env->pb[num] != value)) {
172
        env->pb[num] = value;
173
        /* Should be optimized */
174
        tlb_flush(env, 1);
175
    }
176
}
177

    
178
target_ulong helper_load_40x_pit (void)
179
{
180
    return load_40x_pit(env);
181
}
182

    
183
void helper_store_40x_pit (target_ulong val)
184
{
185
    store_40x_pit(env, val);
186
}
187

    
188
void helper_store_40x_dbcr0 (target_ulong val)
189
{
190
    store_40x_dbcr0(env, val);
191
}
192

    
193
void helper_store_40x_sler (target_ulong val)
194
{
195
    store_40x_sler(env, val);
196
}
197

    
198
void helper_store_booke_tcr (target_ulong val)
199
{
200
    store_booke_tcr(env, val);
201
}
202

    
203
void helper_store_booke_tsr (target_ulong val)
204
{
205
    store_booke_tsr(env, val);
206
}
207

    
208
void helper_store_ibatu (uint32_t nr, target_ulong val)
209
{
210
    ppc_store_ibatu(env, nr, val);
211
}
212

    
213
void helper_store_ibatl (uint32_t nr, target_ulong val)
214
{
215
    ppc_store_ibatl(env, nr, val);
216
}
217

    
218
void helper_store_dbatu (uint32_t nr, target_ulong val)
219
{
220
    ppc_store_dbatu(env, nr, val);
221
}
222

    
223
void helper_store_dbatl (uint32_t nr, target_ulong val)
224
{
225
    ppc_store_dbatl(env, nr, val);
226
}
227

    
228
void helper_store_601_batl (uint32_t nr, target_ulong val)
229
{
230
    ppc_store_ibatl_601(env, nr, val);
231
}
232

    
233
void helper_store_601_batu (uint32_t nr, target_ulong val)
234
{
235
    ppc_store_ibatu_601(env, nr, val);
236
}
237
#endif
238

    
239
/*****************************************************************************/
240
/* Memory load and stores */
241

    
242
static inline target_ulong addr_add(target_ulong addr, target_long arg)
243
{
244
#if defined(TARGET_PPC64)
245
        if (!msr_sf)
246
            return (uint32_t)(addr + arg);
247
        else
248
#endif
249
            return addr + arg;
250
}
251

    
252
void helper_lmw (target_ulong addr, uint32_t reg)
253
{
254
    for (; reg < 32; reg++) {
255
        if (msr_le)
256
            env->gpr[reg] = bswap32(ldl(addr));
257
        else
258
            env->gpr[reg] = ldl(addr);
259
        addr = addr_add(addr, 4);
260
    }
261
}
262

    
263
void helper_stmw (target_ulong addr, uint32_t reg)
264
{
265
    for (; reg < 32; reg++) {
266
        if (msr_le)
267
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
268
        else
269
            stl(addr, (uint32_t)env->gpr[reg]);
270
        addr = addr_add(addr, 4);
271
    }
272
}
273

    
274
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
275
{
276
    int sh;
277
    for (; nb > 3; nb -= 4) {
278
        env->gpr[reg] = ldl(addr);
279
        reg = (reg + 1) % 32;
280
        addr = addr_add(addr, 4);
281
    }
282
    if (unlikely(nb > 0)) {
283
        env->gpr[reg] = 0;
284
        for (sh = 24; nb > 0; nb--, sh -= 8) {
285
            env->gpr[reg] |= ldub(addr) << sh;
286
            addr = addr_add(addr, 1);
287
        }
288
    }
289
}
290
/* PPC32 specification says we must generate an exception if
291
 * rA is in the range of registers to be loaded.
292
 * In an other hand, IBM says this is valid, but rA won't be loaded.
293
 * For now, I'll follow the spec...
294
 */
295
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
296
{
297
    if (likely(xer_bc != 0)) {
298
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
299
                     (reg < rb && (reg + xer_bc) > rb))) {
300
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
301
                                       POWERPC_EXCP_INVAL |
302
                                       POWERPC_EXCP_INVAL_LSWX);
303
        } else {
304
            helper_lsw(addr, xer_bc, reg);
305
        }
306
    }
307
}
308

    
309
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
310
{
311
    int sh;
312
    for (; nb > 3; nb -= 4) {
313
        stl(addr, env->gpr[reg]);
314
        reg = (reg + 1) % 32;
315
        addr = addr_add(addr, 4);
316
    }
317
    if (unlikely(nb > 0)) {
318
        for (sh = 24; nb > 0; nb--, sh -= 8) {
319
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
320
            addr = addr_add(addr, 1);
321
        }
322
    }
323
}
324

    
325
static void do_dcbz(target_ulong addr, int dcache_line_size)
326
{
327
    addr &= ~(dcache_line_size - 1);
328
    int i;
329
    for (i = 0 ; i < dcache_line_size ; i += 4) {
330
        stl(addr + i , 0);
331
    }
332
    if (env->reserve_addr == addr)
333
        env->reserve_addr = (target_ulong)-1ULL;
334
}
335

    
336
void helper_dcbz(target_ulong addr)
337
{
338
    do_dcbz(addr, env->dcache_line_size);
339
}
340

    
341
void helper_dcbz_970(target_ulong addr)
342
{
343
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
344
        do_dcbz(addr, 32);
345
    else
346
        do_dcbz(addr, env->dcache_line_size);
347
}
348

    
349
void helper_icbi(target_ulong addr)
350
{
351
    addr &= ~(env->dcache_line_size - 1);
352
    /* Invalidate one cache line :
353
     * PowerPC specification says this is to be treated like a load
354
     * (not a fetch) by the MMU. To be sure it will be so,
355
     * do the load "by hand".
356
     */
357
    ldl(addr);
358
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
359
}
360

    
361
// XXX: to be tested
362
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
363
{
364
    int i, c, d;
365
    d = 24;
366
    for (i = 0; i < xer_bc; i++) {
367
        c = ldub(addr);
368
        addr = addr_add(addr, 1);
369
        /* ra (if not 0) and rb are never modified */
370
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
371
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
372
        }
373
        if (unlikely(c == xer_cmp))
374
            break;
375
        if (likely(d != 0)) {
376
            d -= 8;
377
        } else {
378
            d = 24;
379
            reg++;
380
            reg = reg & 0x1F;
381
        }
382
    }
383
    return i;
384
}
385

    
386
/*****************************************************************************/
387
/* Fixed point operations helpers */
388
#if defined(TARGET_PPC64)
389

    
390
/* multiply high word */
391
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
392
{
393
    uint64_t tl, th;
394

    
395
    muls64(&tl, &th, arg1, arg2);
396
    return th;
397
}
398

    
399
/* multiply high word unsigned */
400
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
401
{
402
    uint64_t tl, th;
403

    
404
    mulu64(&tl, &th, arg1, arg2);
405
    return th;
406
}
407

    
408
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
409
{
410
    int64_t th;
411
    uint64_t tl;
412

    
413
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
414
    /* If th != 0 && th != -1, then we had an overflow */
415
    if (likely((uint64_t)(th + 1) <= 1)) {
416
        env->xer &= ~(1 << XER_OV);
417
    } else {
418
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
419
    }
420
    return (int64_t)tl;
421
}
422
#endif
423

    
424
target_ulong helper_cntlzw (target_ulong t)
425
{
426
    return clz32(t);
427
}
428

    
429
#if defined(TARGET_PPC64)
430
target_ulong helper_cntlzd (target_ulong t)
431
{
432
    return clz64(t);
433
}
434
#endif
435

    
436
/* shift right arithmetic helper */
437
target_ulong helper_sraw (target_ulong value, target_ulong shift)
438
{
439
    int32_t ret;
440

    
441
    if (likely(!(shift & 0x20))) {
442
        if (likely((uint32_t)shift != 0)) {
443
            shift &= 0x1f;
444
            ret = (int32_t)value >> shift;
445
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
446
                env->xer &= ~(1 << XER_CA);
447
            } else {
448
                env->xer |= (1 << XER_CA);
449
            }
450
        } else {
451
            ret = (int32_t)value;
452
            env->xer &= ~(1 << XER_CA);
453
        }
454
    } else {
455
        ret = (int32_t)value >> 31;
456
        if (ret) {
457
            env->xer |= (1 << XER_CA);
458
        } else {
459
            env->xer &= ~(1 << XER_CA);
460
        }
461
    }
462
    return (target_long)ret;
463
}
464

    
465
#if defined(TARGET_PPC64)
466
target_ulong helper_srad (target_ulong value, target_ulong shift)
467
{
468
    int64_t ret;
469

    
470
    if (likely(!(shift & 0x40))) {
471
        if (likely((uint64_t)shift != 0)) {
472
            shift &= 0x3f;
473
            ret = (int64_t)value >> shift;
474
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
475
                env->xer &= ~(1 << XER_CA);
476
            } else {
477
                env->xer |= (1 << XER_CA);
478
            }
479
        } else {
480
            ret = (int64_t)value;
481
            env->xer &= ~(1 << XER_CA);
482
        }
483
    } else {
484
        ret = (int64_t)value >> 63;
485
        if (ret) {
486
            env->xer |= (1 << XER_CA);
487
        } else {
488
            env->xer &= ~(1 << XER_CA);
489
        }
490
    }
491
    return ret;
492
}
493
#endif
494

    
495
target_ulong helper_popcntb (target_ulong val)
496
{
497
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
498
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
499
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
500
    return val;
501
}
502

    
503
#if defined(TARGET_PPC64)
504
target_ulong helper_popcntb_64 (target_ulong val)
505
{
506
    val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
507
    val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
508
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
509
    return val;
510
}
511
#endif
512

    
513
/*****************************************************************************/
514
/* Floating point operations helpers */
515
uint64_t helper_float32_to_float64(uint32_t arg)
516
{
517
    CPU_FloatU f;
518
    CPU_DoubleU d;
519
    f.l = arg;
520
    d.d = float32_to_float64(f.f, &env->fp_status);
521
    return d.ll;
522
}
523

    
524
uint32_t helper_float64_to_float32(uint64_t arg)
525
{
526
    CPU_FloatU f;
527
    CPU_DoubleU d;
528
    d.ll = arg;
529
    f.f = float64_to_float32(d.d, &env->fp_status);
530
    return f.l;
531
}
532

    
533
static inline int isden(float64 d)
534
{
535
    CPU_DoubleU u;
536

    
537
    u.d = d;
538

    
539
    return ((u.ll >> 52) & 0x7FF) == 0;
540
}
541

    
542
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
543
{
544
    CPU_DoubleU farg;
545
    int isneg;
546
    int ret;
547
    farg.ll = arg;
548
    isneg = float64_is_neg(farg.d);
549
    if (unlikely(float64_is_quiet_nan(farg.d))) {
550
        if (float64_is_signaling_nan(farg.d)) {
551
            /* Signaling NaN: flags are undefined */
552
            ret = 0x00;
553
        } else {
554
            /* Quiet NaN */
555
            ret = 0x11;
556
        }
557
    } else if (unlikely(float64_is_infinity(farg.d))) {
558
        /* +/- infinity */
559
        if (isneg)
560
            ret = 0x09;
561
        else
562
            ret = 0x05;
563
    } else {
564
        if (float64_is_zero(farg.d)) {
565
            /* +/- zero */
566
            if (isneg)
567
                ret = 0x12;
568
            else
569
                ret = 0x02;
570
        } else {
571
            if (isden(farg.d)) {
572
                /* Denormalized numbers */
573
                ret = 0x10;
574
            } else {
575
                /* Normalized numbers */
576
                ret = 0x00;
577
            }
578
            if (isneg) {
579
                ret |= 0x08;
580
            } else {
581
                ret |= 0x04;
582
            }
583
        }
584
    }
585
    if (set_fprf) {
586
        /* We update FPSCR_FPRF */
587
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
588
        env->fpscr |= ret << FPSCR_FPRF;
589
    }
590
    /* We just need fpcc to update Rc1 */
591
    return ret & 0xF;
592
}
593

    
594
/* Floating-point invalid operations exception */
595
static inline uint64_t fload_invalid_op_excp(int op)
596
{
597
    uint64_t ret = 0;
598
    int ve;
599

    
600
    ve = fpscr_ve;
601
    switch (op) {
602
    case POWERPC_EXCP_FP_VXSNAN:
603
        env->fpscr |= 1 << FPSCR_VXSNAN;
604
        break;
605
    case POWERPC_EXCP_FP_VXSOFT:
606
        env->fpscr |= 1 << FPSCR_VXSOFT;
607
        break;
608
    case POWERPC_EXCP_FP_VXISI:
609
        /* Magnitude subtraction of infinities */
610
        env->fpscr |= 1 << FPSCR_VXISI;
611
        goto update_arith;
612
    case POWERPC_EXCP_FP_VXIDI:
613
        /* Division of infinity by infinity */
614
        env->fpscr |= 1 << FPSCR_VXIDI;
615
        goto update_arith;
616
    case POWERPC_EXCP_FP_VXZDZ:
617
        /* Division of zero by zero */
618
        env->fpscr |= 1 << FPSCR_VXZDZ;
619
        goto update_arith;
620
    case POWERPC_EXCP_FP_VXIMZ:
621
        /* Multiplication of zero by infinity */
622
        env->fpscr |= 1 << FPSCR_VXIMZ;
623
        goto update_arith;
624
    case POWERPC_EXCP_FP_VXVC:
625
        /* Ordered comparison of NaN */
626
        env->fpscr |= 1 << FPSCR_VXVC;
627
        env->fpscr &= ~(0xF << FPSCR_FPCC);
628
        env->fpscr |= 0x11 << FPSCR_FPCC;
629
        /* We must update the target FPR before raising the exception */
630
        if (ve != 0) {
631
            env->exception_index = POWERPC_EXCP_PROGRAM;
632
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
633
            /* Update the floating-point enabled exception summary */
634
            env->fpscr |= 1 << FPSCR_FEX;
635
            /* Exception is differed */
636
            ve = 0;
637
        }
638
        break;
639
    case POWERPC_EXCP_FP_VXSQRT:
640
        /* Square root of a negative number */
641
        env->fpscr |= 1 << FPSCR_VXSQRT;
642
    update_arith:
643
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
644
        if (ve == 0) {
645
            /* Set the result to quiet NaN */
646
            ret = 0x7FF8000000000000ULL;
647
            env->fpscr &= ~(0xF << FPSCR_FPCC);
648
            env->fpscr |= 0x11 << FPSCR_FPCC;
649
        }
650
        break;
651
    case POWERPC_EXCP_FP_VXCVI:
652
        /* Invalid conversion */
653
        env->fpscr |= 1 << FPSCR_VXCVI;
654
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
655
        if (ve == 0) {
656
            /* Set the result to quiet NaN */
657
            ret = 0x7FF8000000000000ULL;
658
            env->fpscr &= ~(0xF << FPSCR_FPCC);
659
            env->fpscr |= 0x11 << FPSCR_FPCC;
660
        }
661
        break;
662
    }
663
    /* Update the floating-point invalid operation summary */
664
    env->fpscr |= 1 << FPSCR_VX;
665
    /* Update the floating-point exception summary */
666
    env->fpscr |= 1 << FPSCR_FX;
667
    if (ve != 0) {
668
        /* Update the floating-point enabled exception summary */
669
        env->fpscr |= 1 << FPSCR_FEX;
670
        if (msr_fe0 != 0 || msr_fe1 != 0)
671
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
672
    }
673
    return ret;
674
}
675

    
676
static inline void float_zero_divide_excp(void)
677
{
678
    env->fpscr |= 1 << FPSCR_ZX;
679
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
680
    /* Update the floating-point exception summary */
681
    env->fpscr |= 1 << FPSCR_FX;
682
    if (fpscr_ze != 0) {
683
        /* Update the floating-point enabled exception summary */
684
        env->fpscr |= 1 << FPSCR_FEX;
685
        if (msr_fe0 != 0 || msr_fe1 != 0) {
686
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
687
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
688
        }
689
    }
690
}
691

    
692
static inline void float_overflow_excp(void)
693
{
694
    env->fpscr |= 1 << FPSCR_OX;
695
    /* Update the floating-point exception summary */
696
    env->fpscr |= 1 << FPSCR_FX;
697
    if (fpscr_oe != 0) {
698
        /* XXX: should adjust the result */
699
        /* Update the floating-point enabled exception summary */
700
        env->fpscr |= 1 << FPSCR_FEX;
701
        /* We must update the target FPR before raising the exception */
702
        env->exception_index = POWERPC_EXCP_PROGRAM;
703
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
704
    } else {
705
        env->fpscr |= 1 << FPSCR_XX;
706
        env->fpscr |= 1 << FPSCR_FI;
707
    }
708
}
709

    
710
static inline void float_underflow_excp(void)
711
{
712
    env->fpscr |= 1 << FPSCR_UX;
713
    /* Update the floating-point exception summary */
714
    env->fpscr |= 1 << FPSCR_FX;
715
    if (fpscr_ue != 0) {
716
        /* XXX: should adjust the result */
717
        /* Update the floating-point enabled exception summary */
718
        env->fpscr |= 1 << FPSCR_FEX;
719
        /* We must update the target FPR before raising the exception */
720
        env->exception_index = POWERPC_EXCP_PROGRAM;
721
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
722
    }
723
}
724

    
725
static inline void float_inexact_excp(void)
726
{
727
    env->fpscr |= 1 << FPSCR_XX;
728
    /* Update the floating-point exception summary */
729
    env->fpscr |= 1 << FPSCR_FX;
730
    if (fpscr_xe != 0) {
731
        /* Update the floating-point enabled exception summary */
732
        env->fpscr |= 1 << FPSCR_FEX;
733
        /* We must update the target FPR before raising the exception */
734
        env->exception_index = POWERPC_EXCP_PROGRAM;
735
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
736
    }
737
}
738

    
739
static inline void fpscr_set_rounding_mode(void)
740
{
741
    int rnd_type;
742

    
743
    /* Set rounding mode */
744
    switch (fpscr_rn) {
745
    case 0:
746
        /* Best approximation (round to nearest) */
747
        rnd_type = float_round_nearest_even;
748
        break;
749
    case 1:
750
        /* Smaller magnitude (round toward zero) */
751
        rnd_type = float_round_to_zero;
752
        break;
753
    case 2:
754
        /* Round toward +infinite */
755
        rnd_type = float_round_up;
756
        break;
757
    default:
758
    case 3:
759
        /* Round toward -infinite */
760
        rnd_type = float_round_down;
761
        break;
762
    }
763
    set_float_rounding_mode(rnd_type, &env->fp_status);
764
}
765

    
766
void helper_fpscr_clrbit (uint32_t bit)
767
{
768
    int prev;
769

    
770
    prev = (env->fpscr >> bit) & 1;
771
    env->fpscr &= ~(1 << bit);
772
    if (prev == 1) {
773
        switch (bit) {
774
        case FPSCR_RN1:
775
        case FPSCR_RN:
776
            fpscr_set_rounding_mode();
777
            break;
778
        default:
779
            break;
780
        }
781
    }
782
}
783

    
784
void helper_fpscr_setbit (uint32_t bit)
785
{
786
    int prev;
787

    
788
    prev = (env->fpscr >> bit) & 1;
789
    env->fpscr |= 1 << bit;
790
    if (prev == 0) {
791
        switch (bit) {
792
        case FPSCR_VX:
793
            env->fpscr |= 1 << FPSCR_FX;
794
            if (fpscr_ve)
795
                goto raise_ve;
796
        case FPSCR_OX:
797
            env->fpscr |= 1 << FPSCR_FX;
798
            if (fpscr_oe)
799
                goto raise_oe;
800
            break;
801
        case FPSCR_UX:
802
            env->fpscr |= 1 << FPSCR_FX;
803
            if (fpscr_ue)
804
                goto raise_ue;
805
            break;
806
        case FPSCR_ZX:
807
            env->fpscr |= 1 << FPSCR_FX;
808
            if (fpscr_ze)
809
                goto raise_ze;
810
            break;
811
        case FPSCR_XX:
812
            env->fpscr |= 1 << FPSCR_FX;
813
            if (fpscr_xe)
814
                goto raise_xe;
815
            break;
816
        case FPSCR_VXSNAN:
817
        case FPSCR_VXISI:
818
        case FPSCR_VXIDI:
819
        case FPSCR_VXZDZ:
820
        case FPSCR_VXIMZ:
821
        case FPSCR_VXVC:
822
        case FPSCR_VXSOFT:
823
        case FPSCR_VXSQRT:
824
        case FPSCR_VXCVI:
825
            env->fpscr |= 1 << FPSCR_VX;
826
            env->fpscr |= 1 << FPSCR_FX;
827
            if (fpscr_ve != 0)
828
                goto raise_ve;
829
            break;
830
        case FPSCR_VE:
831
            if (fpscr_vx != 0) {
832
            raise_ve:
833
                env->error_code = POWERPC_EXCP_FP;
834
                if (fpscr_vxsnan)
835
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
836
                if (fpscr_vxisi)
837
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
838
                if (fpscr_vxidi)
839
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
840
                if (fpscr_vxzdz)
841
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
842
                if (fpscr_vximz)
843
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
844
                if (fpscr_vxvc)
845
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
846
                if (fpscr_vxsoft)
847
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
848
                if (fpscr_vxsqrt)
849
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
850
                if (fpscr_vxcvi)
851
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
852
                goto raise_excp;
853
            }
854
            break;
855
        case FPSCR_OE:
856
            if (fpscr_ox != 0) {
857
            raise_oe:
858
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
859
                goto raise_excp;
860
            }
861
            break;
862
        case FPSCR_UE:
863
            if (fpscr_ux != 0) {
864
            raise_ue:
865
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
866
                goto raise_excp;
867
            }
868
            break;
869
        case FPSCR_ZE:
870
            if (fpscr_zx != 0) {
871
            raise_ze:
872
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
873
                goto raise_excp;
874
            }
875
            break;
876
        case FPSCR_XE:
877
            if (fpscr_xx != 0) {
878
            raise_xe:
879
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
880
                goto raise_excp;
881
            }
882
            break;
883
        case FPSCR_RN1:
884
        case FPSCR_RN:
885
            fpscr_set_rounding_mode();
886
            break;
887
        default:
888
            break;
889
        raise_excp:
890
            /* Update the floating-point enabled exception summary */
891
            env->fpscr |= 1 << FPSCR_FEX;
892
                /* We have to update Rc1 before raising the exception */
893
            env->exception_index = POWERPC_EXCP_PROGRAM;
894
            break;
895
        }
896
    }
897
}
898

    
899
void helper_store_fpscr (uint64_t arg, uint32_t mask)
900
{
901
    /*
902
     * We use only the 32 LSB of the incoming fpr
903
     */
904
    uint32_t prev, new;
905
    int i;
906

    
907
    prev = env->fpscr;
908
    new = (uint32_t)arg;
909
    new &= ~0x60000000;
910
    new |= prev & 0x60000000;
911
    for (i = 0; i < 8; i++) {
912
        if (mask & (1 << i)) {
913
            env->fpscr &= ~(0xF << (4 * i));
914
            env->fpscr |= new & (0xF << (4 * i));
915
        }
916
    }
917
    /* Update VX and FEX */
918
    if (fpscr_ix != 0)
919
        env->fpscr |= 1 << FPSCR_VX;
920
    else
921
        env->fpscr &= ~(1 << FPSCR_VX);
922
    if ((fpscr_ex & fpscr_eex) != 0) {
923
        env->fpscr |= 1 << FPSCR_FEX;
924
        env->exception_index = POWERPC_EXCP_PROGRAM;
925
        /* XXX: we should compute it properly */
926
        env->error_code = POWERPC_EXCP_FP;
927
    }
928
    else
929
        env->fpscr &= ~(1 << FPSCR_FEX);
930
    fpscr_set_rounding_mode();
931
}
932

    
933
void helper_float_check_status (void)
934
{
935
#ifdef CONFIG_SOFTFLOAT
936
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
937
        (env->error_code & POWERPC_EXCP_FP)) {
938
        /* Differred floating-point exception after target FPR update */
939
        if (msr_fe0 != 0 || msr_fe1 != 0)
940
            helper_raise_exception_err(env->exception_index, env->error_code);
941
    } else {
942
        int status = get_float_exception_flags(&env->fp_status);
943
        if (status & float_flag_divbyzero) {
944
            float_zero_divide_excp();
945
        } else if (status & float_flag_overflow) {
946
            float_overflow_excp();
947
        } else if (status & float_flag_underflow) {
948
            float_underflow_excp();
949
        } else if (status & float_flag_inexact) {
950
            float_inexact_excp();
951
        }
952
    }
953
#else
954
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
955
        (env->error_code & POWERPC_EXCP_FP)) {
956
        /* Differred floating-point exception after target FPR update */
957
        if (msr_fe0 != 0 || msr_fe1 != 0)
958
            helper_raise_exception_err(env->exception_index, env->error_code);
959
    }
960
#endif
961
}
962

    
963
#ifdef CONFIG_SOFTFLOAT
964
void helper_reset_fpstatus (void)
965
{
966
    set_float_exception_flags(0, &env->fp_status);
967
}
968
#endif
969

    
970
/* fadd - fadd. */
971
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
972
{
973
    CPU_DoubleU farg1, farg2;
974

    
975
    farg1.ll = arg1;
976
    farg2.ll = arg2;
977

    
978
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
979
                 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
980
        /* Magnitude subtraction of infinities */
981
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
982
    } else {
983
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
984
                     float64_is_signaling_nan(farg2.d))) {
985
            /* sNaN addition */
986
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
987
        }
988
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
989
    }
990

    
991
    return farg1.ll;
992
}
993

    
994
/* fsub - fsub. */
995
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
996
{
997
    CPU_DoubleU farg1, farg2;
998

    
999
    farg1.ll = arg1;
1000
    farg2.ll = arg2;
1001

    
1002
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1003
                 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1004
        /* Magnitude subtraction of infinities */
1005
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1006
    } else {
1007
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1008
                     float64_is_signaling_nan(farg2.d))) {
1009
            /* sNaN subtraction */
1010
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1011
        }
1012
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1013
    }
1014

    
1015
    return farg1.ll;
1016
}
1017

    
1018
/* fmul - fmul. */
1019
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1020
{
1021
    CPU_DoubleU farg1, farg2;
1022

    
1023
    farg1.ll = arg1;
1024
    farg2.ll = arg2;
1025

    
1026
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1027
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1028
        /* Multiplication of zero by infinity */
1029
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1030
    } else {
1031
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1032
                     float64_is_signaling_nan(farg2.d))) {
1033
            /* sNaN multiplication */
1034
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1035
        }
1036
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1037
    }
1038

    
1039
    return farg1.ll;
1040
}
1041

    
1042
/* fdiv - fdiv. */
1043
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1044
{
1045
    CPU_DoubleU farg1, farg2;
1046

    
1047
    farg1.ll = arg1;
1048
    farg2.ll = arg2;
1049

    
1050
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1051
        /* Division of infinity by infinity */
1052
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1053
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1054
        /* Division of zero by zero */
1055
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1056
    } else {
1057
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1058
                     float64_is_signaling_nan(farg2.d))) {
1059
            /* sNaN division */
1060
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1061
        }
1062
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1063
    }
1064

    
1065
    return farg1.ll;
1066
}
1067

    
1068
/* fabs */
1069
uint64_t helper_fabs (uint64_t arg)
1070
{
1071
    CPU_DoubleU farg;
1072

    
1073
    farg.ll = arg;
1074
    farg.d = float64_abs(farg.d);
1075
    return farg.ll;
1076
}
1077

    
1078
/* fnabs */
1079
uint64_t helper_fnabs (uint64_t arg)
1080
{
1081
    CPU_DoubleU farg;
1082

    
1083
    farg.ll = arg;
1084
    farg.d = float64_abs(farg.d);
1085
    farg.d = float64_chs(farg.d);
1086
    return farg.ll;
1087
}
1088

    
1089
/* fneg */
1090
uint64_t helper_fneg (uint64_t arg)
1091
{
1092
    CPU_DoubleU farg;
1093

    
1094
    farg.ll = arg;
1095
    farg.d = float64_chs(farg.d);
1096
    return farg.ll;
1097
}
1098

    
1099
/* fctiw - fctiw. */
1100
uint64_t helper_fctiw (uint64_t arg)
1101
{
1102
    CPU_DoubleU farg;
1103
    farg.ll = arg;
1104

    
1105
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1106
        /* sNaN conversion */
1107
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1108
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1109
        /* qNan / infinity conversion */
1110
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1111
    } else {
1112
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1113
        /* XXX: higher bits are not supposed to be significant.
1114
         *     to make tests easier, return the same as a real PowerPC 750
1115
         */
1116
        farg.ll |= 0xFFF80000ULL << 32;
1117
    }
1118
    return farg.ll;
1119
}
1120

    
1121
/* fctiwz - fctiwz. */
1122
uint64_t helper_fctiwz (uint64_t arg)
1123
{
1124
    CPU_DoubleU farg;
1125
    farg.ll = arg;
1126

    
1127
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1128
        /* sNaN conversion */
1129
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1130
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1131
        /* qNan / infinity conversion */
1132
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1133
    } else {
1134
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1135
        /* XXX: higher bits are not supposed to be significant.
1136
         *     to make tests easier, return the same as a real PowerPC 750
1137
         */
1138
        farg.ll |= 0xFFF80000ULL << 32;
1139
    }
1140
    return farg.ll;
1141
}
1142

    
1143
#if defined(TARGET_PPC64)
1144
/* fcfid - fcfid. */
1145
uint64_t helper_fcfid (uint64_t arg)
1146
{
1147
    CPU_DoubleU farg;
1148
    farg.d = int64_to_float64(arg, &env->fp_status);
1149
    return farg.ll;
1150
}
1151

    
1152
/* fctid - fctid. */
1153
uint64_t helper_fctid (uint64_t arg)
1154
{
1155
    CPU_DoubleU farg;
1156
    farg.ll = arg;
1157

    
1158
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1159
        /* sNaN conversion */
1160
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1161
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1162
        /* qNan / infinity conversion */
1163
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1164
    } else {
1165
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1166
    }
1167
    return farg.ll;
1168
}
1169

    
1170
/* fctidz - fctidz. */
1171
uint64_t helper_fctidz (uint64_t arg)
1172
{
1173
    CPU_DoubleU farg;
1174
    farg.ll = arg;
1175

    
1176
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1177
        /* sNaN conversion */
1178
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1179
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1180
        /* qNan / infinity conversion */
1181
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1182
    } else {
1183
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1184
    }
1185
    return farg.ll;
1186
}
1187

    
1188
#endif
1189

    
1190
static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1191
{
1192
    CPU_DoubleU farg;
1193
    farg.ll = arg;
1194

    
1195
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1196
        /* sNaN round */
1197
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1198
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1199
        /* qNan / infinity round */
1200
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1201
    } else {
1202
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1203
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1204
        /* Restore rounding mode from FPSCR */
1205
        fpscr_set_rounding_mode();
1206
    }
1207
    return farg.ll;
1208
}
1209

    
1210
uint64_t helper_frin (uint64_t arg)
1211
{
1212
    return do_fri(arg, float_round_nearest_even);
1213
}
1214

    
1215
uint64_t helper_friz (uint64_t arg)
1216
{
1217
    return do_fri(arg, float_round_to_zero);
1218
}
1219

    
1220
uint64_t helper_frip (uint64_t arg)
1221
{
1222
    return do_fri(arg, float_round_up);
1223
}
1224

    
1225
uint64_t helper_frim (uint64_t arg)
1226
{
1227
    return do_fri(arg, float_round_down);
1228
}
1229

    
1230
/* fmadd - fmadd. */
1231
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1232
{
1233
    CPU_DoubleU farg1, farg2, farg3;
1234

    
1235
    farg1.ll = arg1;
1236
    farg2.ll = arg2;
1237
    farg3.ll = arg3;
1238

    
1239
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1240
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1241
        /* Multiplication of zero by infinity */
1242
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1243
    } else {
1244
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1245
                     float64_is_signaling_nan(farg2.d) ||
1246
                     float64_is_signaling_nan(farg3.d))) {
1247
            /* sNaN operation */
1248
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1249
        }
1250
#ifdef FLOAT128
1251
        /* This is the way the PowerPC specification defines it */
1252
        float128 ft0_128, ft1_128;
1253

    
1254
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1255
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1256
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1257
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1258
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1259
            /* Magnitude subtraction of infinities */
1260
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1261
        } else {
1262
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1263
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1264
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1265
        }
1266
#else
1267
        /* This is OK on x86 hosts */
1268
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1269
#endif
1270
    }
1271

    
1272
    return farg1.ll;
1273
}
1274

    
1275
/* fmsub - fmsub. */
1276
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1277
{
1278
    CPU_DoubleU farg1, farg2, farg3;
1279

    
1280
    farg1.ll = arg1;
1281
    farg2.ll = arg2;
1282
    farg3.ll = arg3;
1283

    
1284
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1285
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1286
        /* Multiplication of zero by infinity */
1287
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1288
    } else {
1289
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1290
                     float64_is_signaling_nan(farg2.d) ||
1291
                     float64_is_signaling_nan(farg3.d))) {
1292
            /* sNaN operation */
1293
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1294
        }
1295
#ifdef FLOAT128
1296
        /* This is the way the PowerPC specification defines it */
1297
        float128 ft0_128, ft1_128;
1298

    
1299
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1300
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1301
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1302
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1303
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1304
            /* Magnitude subtraction of infinities */
1305
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1306
        } else {
1307
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1308
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1309
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1310
        }
1311
#else
1312
        /* This is OK on x86 hosts */
1313
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1314
#endif
1315
    }
1316
    return farg1.ll;
1317
}
1318

    
1319
/* fnmadd - fnmadd. */
1320
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1321
{
1322
    CPU_DoubleU farg1, farg2, farg3;
1323

    
1324
    farg1.ll = arg1;
1325
    farg2.ll = arg2;
1326
    farg3.ll = arg3;
1327

    
1328
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1329
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1330
        /* Multiplication of zero by infinity */
1331
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1332
    } else {
1333
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1334
                     float64_is_signaling_nan(farg2.d) ||
1335
                     float64_is_signaling_nan(farg3.d))) {
1336
            /* sNaN operation */
1337
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1338
        }
1339
#ifdef FLOAT128
1340
        /* This is the way the PowerPC specification defines it */
1341
        float128 ft0_128, ft1_128;
1342

    
1343
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1344
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1345
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1346
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1347
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1348
            /* Magnitude subtraction of infinities */
1349
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1350
        } else {
1351
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1352
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1353
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1354
        }
1355
#else
1356
        /* This is OK on x86 hosts */
1357
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1358
#endif
1359
        if (likely(!float64_is_quiet_nan(farg1.d)))
1360
            farg1.d = float64_chs(farg1.d);
1361
    }
1362
    return farg1.ll;
1363
}
1364

    
1365
/* fnmsub - fnmsub. */
1366
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1367
{
1368
    CPU_DoubleU farg1, farg2, farg3;
1369

    
1370
    farg1.ll = arg1;
1371
    farg2.ll = arg2;
1372
    farg3.ll = arg3;
1373

    
1374
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1375
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1376
        /* Multiplication of zero by infinity */
1377
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1378
    } else {
1379
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1380
                     float64_is_signaling_nan(farg2.d) ||
1381
                     float64_is_signaling_nan(farg3.d))) {
1382
            /* sNaN operation */
1383
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1384
        }
1385
#ifdef FLOAT128
1386
        /* This is the way the PowerPC specification defines it */
1387
        float128 ft0_128, ft1_128;
1388

    
1389
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1390
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1391
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1392
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1393
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1394
            /* Magnitude subtraction of infinities */
1395
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1396
        } else {
1397
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1398
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1399
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1400
        }
1401
#else
1402
        /* This is OK on x86 hosts */
1403
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1404
#endif
1405
        if (likely(!float64_is_quiet_nan(farg1.d)))
1406
            farg1.d = float64_chs(farg1.d);
1407
    }
1408
    return farg1.ll;
1409
}
1410

    
1411
/* frsp - frsp. */
1412
uint64_t helper_frsp (uint64_t arg)
1413
{
1414
    CPU_DoubleU farg;
1415
    float32 f32;
1416
    farg.ll = arg;
1417

    
1418
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1419
        /* sNaN square root */
1420
       fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1421
    }
1422
    f32 = float64_to_float32(farg.d, &env->fp_status);
1423
    farg.d = float32_to_float64(f32, &env->fp_status);
1424

    
1425
    return farg.ll;
1426
}
1427

    
1428
/* fsqrt - fsqrt. */
1429
uint64_t helper_fsqrt (uint64_t arg)
1430
{
1431
    CPU_DoubleU farg;
1432
    farg.ll = arg;
1433

    
1434
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1435
        /* Square root of a negative nonzero number */
1436
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1437
    } else {
1438
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1439
            /* sNaN square root */
1440
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1441
        }
1442
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1443
    }
1444
    return farg.ll;
1445
}
1446

    
1447
/* fre - fre. */
1448
uint64_t helper_fre (uint64_t arg)
1449
{
1450
    CPU_DoubleU farg;
1451
    farg.ll = arg;
1452

    
1453
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1454
        /* sNaN reciprocal */
1455
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1456
    }
1457
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1458
    return farg.d;
1459
}
1460

    
1461
/* fres - fres. */
1462
uint64_t helper_fres (uint64_t arg)
1463
{
1464
    CPU_DoubleU farg;
1465
    float32 f32;
1466
    farg.ll = arg;
1467

    
1468
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1469
        /* sNaN reciprocal */
1470
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1471
    }
1472
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1473
    f32 = float64_to_float32(farg.d, &env->fp_status);
1474
    farg.d = float32_to_float64(f32, &env->fp_status);
1475

    
1476
    return farg.ll;
1477
}
1478

    
1479
/* frsqrte  - frsqrte. */
1480
uint64_t helper_frsqrte (uint64_t arg)
1481
{
1482
    CPU_DoubleU farg;
1483
    float32 f32;
1484
    farg.ll = arg;
1485

    
1486
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1487
        /* Reciprocal square root of a negative nonzero number */
1488
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1489
    } else {
1490
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1491
            /* sNaN reciprocal square root */
1492
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1493
        }
1494
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1495
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1496
        f32 = float64_to_float32(farg.d, &env->fp_status);
1497
        farg.d = float32_to_float64(f32, &env->fp_status);
1498
    }
1499
    return farg.ll;
1500
}
1501

    
1502
/* fsel - fsel. */
1503
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1504
{
1505
    CPU_DoubleU farg1;
1506

    
1507
    farg1.ll = arg1;
1508

    
1509
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_quiet_nan(farg1.d))
1510
        return arg2;
1511
    else
1512
        return arg3;
1513
}
1514

    
1515
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1516
{
1517
    CPU_DoubleU farg1, farg2;
1518
    uint32_t ret = 0;
1519
    farg1.ll = arg1;
1520
    farg2.ll = arg2;
1521

    
1522
    if (unlikely(float64_is_quiet_nan(farg1.d) ||
1523
                 float64_is_quiet_nan(farg2.d))) {
1524
        ret = 0x01UL;
1525
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1526
        ret = 0x08UL;
1527
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1528
        ret = 0x04UL;
1529
    } else {
1530
        ret = 0x02UL;
1531
    }
1532

    
1533
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1534
    env->fpscr |= ret << FPSCR_FPRF;
1535
    env->crf[crfD] = ret;
1536
    if (unlikely(ret == 0x01UL
1537
                 && (float64_is_signaling_nan(farg1.d) ||
1538
                     float64_is_signaling_nan(farg2.d)))) {
1539
        /* sNaN comparison */
1540
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1541
    }
1542
}
1543

    
1544
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1545
{
1546
    CPU_DoubleU farg1, farg2;
1547
    uint32_t ret = 0;
1548
    farg1.ll = arg1;
1549
    farg2.ll = arg2;
1550

    
1551
    if (unlikely(float64_is_quiet_nan(farg1.d) ||
1552
                 float64_is_quiet_nan(farg2.d))) {
1553
        ret = 0x01UL;
1554
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1555
        ret = 0x08UL;
1556
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1557
        ret = 0x04UL;
1558
    } else {
1559
        ret = 0x02UL;
1560
    }
1561

    
1562
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1563
    env->fpscr |= ret << FPSCR_FPRF;
1564
    env->crf[crfD] = ret;
1565
    if (unlikely (ret == 0x01UL)) {
1566
        if (float64_is_signaling_nan(farg1.d) ||
1567
            float64_is_signaling_nan(farg2.d)) {
1568
            /* sNaN comparison */
1569
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1570
                                  POWERPC_EXCP_FP_VXVC);
1571
        } else {
1572
            /* qNaN comparison */
1573
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1574
        }
1575
    }
1576
}
1577

    
1578
#if !defined (CONFIG_USER_ONLY)
1579
void helper_store_msr (target_ulong val)
1580
{
1581
    val = hreg_store_msr(env, val, 0);
1582
    if (val != 0) {
1583
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1584
        helper_raise_exception(val);
1585
    }
1586
}
1587

    
1588
static inline void do_rfi(target_ulong nip, target_ulong msr,
1589
                          target_ulong msrm, int keep_msrh)
1590
{
1591
#if defined(TARGET_PPC64)
1592
    if (msr & (1ULL << MSR_SF)) {
1593
        nip = (uint64_t)nip;
1594
        msr &= (uint64_t)msrm;
1595
    } else {
1596
        nip = (uint32_t)nip;
1597
        msr = (uint32_t)(msr & msrm);
1598
        if (keep_msrh)
1599
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1600
    }
1601
#else
1602
    nip = (uint32_t)nip;
1603
    msr &= (uint32_t)msrm;
1604
#endif
1605
    /* XXX: beware: this is false if VLE is supported */
1606
    env->nip = nip & ~((target_ulong)0x00000003);
1607
    hreg_store_msr(env, msr, 1);
1608
#if defined (DEBUG_OP)
1609
    cpu_dump_rfi(env->nip, env->msr);
1610
#endif
1611
    /* No need to raise an exception here,
1612
     * as rfi is always the last insn of a TB
1613
     */
1614
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1615
}
1616

    
1617
void helper_rfi (void)
1618
{
1619
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1620
           ~((target_ulong)0x783F0000), 1);
1621
}
1622

    
1623
#if defined(TARGET_PPC64)
1624
void helper_rfid (void)
1625
{
1626
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1627
           ~((target_ulong)0x783F0000), 0);
1628
}
1629

    
1630
void helper_hrfid (void)
1631
{
1632
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1633
           ~((target_ulong)0x783F0000), 0);
1634
}
1635
#endif
1636
#endif
1637

    
1638
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1639
{
1640
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1641
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1642
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1643
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1644
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1645
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1646
    }
1647
}
1648

    
1649
#if defined(TARGET_PPC64)
1650
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1651
{
1652
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1653
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1654
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1655
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1656
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1657
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1658
}
1659
#endif
1660

    
1661
/*****************************************************************************/
1662
/* PowerPC 601 specific instructions (POWER bridge) */
1663

    
1664
target_ulong helper_clcs (uint32_t arg)
1665
{
1666
    switch (arg) {
1667
    case 0x0CUL:
1668
        /* Instruction cache line size */
1669
        return env->icache_line_size;
1670
        break;
1671
    case 0x0DUL:
1672
        /* Data cache line size */
1673
        return env->dcache_line_size;
1674
        break;
1675
    case 0x0EUL:
1676
        /* Minimum cache line size */
1677
        return (env->icache_line_size < env->dcache_line_size) ?
1678
                env->icache_line_size : env->dcache_line_size;
1679
        break;
1680
    case 0x0FUL:
1681
        /* Maximum cache line size */
1682
        return (env->icache_line_size > env->dcache_line_size) ?
1683
                env->icache_line_size : env->dcache_line_size;
1684
        break;
1685
    default:
1686
        /* Undefined */
1687
        return 0;
1688
        break;
1689
    }
1690
}
1691

    
1692
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1693
{
1694
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1695

    
1696
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1697
        (int32_t)arg2 == 0) {
1698
        env->spr[SPR_MQ] = 0;
1699
        return INT32_MIN;
1700
    } else {
1701
        env->spr[SPR_MQ] = tmp % arg2;
1702
        return  tmp / (int32_t)arg2;
1703
    }
1704
}
1705

    
1706
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1707
{
1708
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1709

    
1710
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1711
        (int32_t)arg2 == 0) {
1712
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1713
        env->spr[SPR_MQ] = 0;
1714
        return INT32_MIN;
1715
    } else {
1716
        env->spr[SPR_MQ] = tmp % arg2;
1717
        tmp /= (int32_t)arg2;
1718
        if ((int32_t)tmp != tmp) {
1719
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1720
        } else {
1721
            env->xer &= ~(1 << XER_OV);
1722
        }
1723
        return tmp;
1724
    }
1725
}
1726

    
1727
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1728
{
1729
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1730
        (int32_t)arg2 == 0) {
1731
        env->spr[SPR_MQ] = 0;
1732
        return INT32_MIN;
1733
    } else {
1734
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1735
        return (int32_t)arg1 / (int32_t)arg2;
1736
    }
1737
}
1738

    
1739
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1740
{
1741
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1742
        (int32_t)arg2 == 0) {
1743
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1744
        env->spr[SPR_MQ] = 0;
1745
        return INT32_MIN;
1746
    } else {
1747
        env->xer &= ~(1 << XER_OV);
1748
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1749
        return (int32_t)arg1 / (int32_t)arg2;
1750
    }
1751
}
1752

    
1753
#if !defined (CONFIG_USER_ONLY)
1754
target_ulong helper_rac (target_ulong addr)
1755
{
1756
    mmu_ctx_t ctx;
1757
    int nb_BATs;
1758
    target_ulong ret = 0;
1759

    
1760
    /* We don't have to generate many instances of this instruction,
1761
     * as rac is supervisor only.
1762
     */
1763
    /* XXX: FIX THIS: Pretend we have no BAT */
1764
    nb_BATs = env->nb_BATs;
1765
    env->nb_BATs = 0;
1766
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1767
        ret = ctx.raddr;
1768
    env->nb_BATs = nb_BATs;
1769
    return ret;
1770
}
1771

    
1772
void helper_rfsvc (void)
1773
{
1774
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1775
}
1776
#endif
1777

    
1778
/*****************************************************************************/
1779
/* 602 specific instructions */
1780
/* mfrom is the most crazy instruction ever seen, imho ! */
1781
/* Real implementation uses a ROM table. Do the same */
1782
/* Extremly decomposed:
1783
 *                      -arg / 256
1784
 * return 256 * log10(10           + 1.0) + 0.5
1785
 */
1786
#if !defined (CONFIG_USER_ONLY)
1787
target_ulong helper_602_mfrom (target_ulong arg)
1788
{
1789
    if (likely(arg < 602)) {
1790
#include "mfrom_table.c"
1791
        return mfrom_ROM_table[arg];
1792
    } else {
1793
        return 0;
1794
    }
1795
}
1796
#endif
1797

    
1798
/*****************************************************************************/
1799
/* Embedded PowerPC specific helpers */
1800

    
1801
/* XXX: to be improved to check access rights when in user-mode */
1802
target_ulong helper_load_dcr (target_ulong dcrn)
1803
{
1804
    uint32_t val = 0;
1805

    
1806
    if (unlikely(env->dcr_env == NULL)) {
1807
        qemu_log("No DCR environment\n");
1808
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1809
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1810
    } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1811
        qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1812
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1813
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1814
    }
1815
    return val;
1816
}
1817

    
1818
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1819
{
1820
    if (unlikely(env->dcr_env == NULL)) {
1821
        qemu_log("No DCR environment\n");
1822
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1823
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1824
    } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1825
        qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1826
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1827
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1828
    }
1829
}
1830

    
1831
#if !defined(CONFIG_USER_ONLY)
1832
void helper_40x_rfci (void)
1833
{
1834
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1835
           ~((target_ulong)0xFFFF0000), 0);
1836
}
1837

    
1838
void helper_rfci (void)
1839
{
1840
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1841
           ~((target_ulong)0x3FFF0000), 0);
1842
}
1843

    
1844
void helper_rfdi (void)
1845
{
1846
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1847
           ~((target_ulong)0x3FFF0000), 0);
1848
}
1849

    
1850
void helper_rfmci (void)
1851
{
1852
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1853
           ~((target_ulong)0x3FFF0000), 0);
1854
}
1855
#endif
1856

    
1857
/* 440 specific */
1858
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1859
{
1860
    target_ulong mask;
1861
    int i;
1862

    
1863
    i = 1;
1864
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1865
        if ((high & mask) == 0) {
1866
            if (update_Rc) {
1867
                env->crf[0] = 0x4;
1868
            }
1869
            goto done;
1870
        }
1871
        i++;
1872
    }
1873
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1874
        if ((low & mask) == 0) {
1875
            if (update_Rc) {
1876
                env->crf[0] = 0x8;
1877
            }
1878
            goto done;
1879
        }
1880
        i++;
1881
    }
1882
    if (update_Rc) {
1883
        env->crf[0] = 0x2;
1884
    }
1885
 done:
1886
    env->xer = (env->xer & ~0x7F) | i;
1887
    if (update_Rc) {
1888
        env->crf[0] |= xer_so;
1889
    }
1890
    return i;
1891
}
1892

    
1893
/*****************************************************************************/
1894
/* Altivec extension helpers */
1895
#if defined(HOST_WORDS_BIGENDIAN)
1896
#define HI_IDX 0
1897
#define LO_IDX 1
1898
#else
1899
#define HI_IDX 1
1900
#define LO_IDX 0
1901
#endif
1902

    
1903
#if defined(HOST_WORDS_BIGENDIAN)
1904
#define VECTOR_FOR_INORDER_I(index, element)            \
1905
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1906
#else
1907
#define VECTOR_FOR_INORDER_I(index, element)            \
1908
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1909
#endif
1910

    
1911
/* If X is a NaN, store the corresponding QNaN into RESULT.  Otherwise,
1912
 * execute the following block.  */
1913
#define DO_HANDLE_NAN(result, x)                \
1914
    if (float32_is_any_nan(x)) {                                \
1915
        CPU_FloatU __f;                                         \
1916
        __f.f = x;                                              \
1917
        __f.l = __f.l | (1 << 22);  /* Set QNaN bit. */         \
1918
        result = __f.f;                                         \
1919
    } else
1920

    
1921
#define HANDLE_NAN1(result, x)                  \
1922
    DO_HANDLE_NAN(result, x)
1923
#define HANDLE_NAN2(result, x, y)               \
1924
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1925
#define HANDLE_NAN3(result, x, y, z)            \
1926
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1927

    
1928
/* Saturating arithmetic helpers.  */
1929
#define SATCVT(from, to, from_type, to_type, min, max)                  \
1930
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1931
    {                                                                   \
1932
        to_type r;                                                      \
1933
        if (x < (from_type)min) {                                       \
1934
            r = min;                                                    \
1935
            *sat = 1;                                                   \
1936
        } else if (x > (from_type)max) {                                \
1937
            r = max;                                                    \
1938
            *sat = 1;                                                   \
1939
        } else {                                                        \
1940
            r = x;                                                      \
1941
        }                                                               \
1942
        return r;                                                       \
1943
    }
1944
#define SATCVTU(from, to, from_type, to_type, min, max)                 \
1945
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1946
    {                                                                   \
1947
        to_type r;                                                      \
1948
        if (x > (from_type)max) {                                       \
1949
            r = max;                                                    \
1950
            *sat = 1;                                                   \
1951
        } else {                                                        \
1952
            r = x;                                                      \
1953
        }                                                               \
1954
        return r;                                                       \
1955
    }
1956
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
1957
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
1958
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
1959

    
1960
SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
1961
SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
1962
SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
1963
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
1964
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
1965
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
1966
#undef SATCVT
1967
#undef SATCVTU
1968

    
1969
#define LVE(name, access, swap, element)                        \
1970
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
1971
    {                                                           \
1972
        size_t n_elems = ARRAY_SIZE(r->element);                \
1973
        int adjust = HI_IDX*(n_elems-1);                        \
1974
        int sh = sizeof(r->element[0]) >> 1;                    \
1975
        int index = (addr & 0xf) >> sh;                         \
1976
        if(msr_le) {                                            \
1977
            r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
1978
        } else {                                                        \
1979
            r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
1980
        }                                                               \
1981
    }
1982
#define I(x) (x)
1983
LVE(lvebx, ldub, I, u8)
1984
LVE(lvehx, lduw, bswap16, u16)
1985
LVE(lvewx, ldl, bswap32, u32)
1986
#undef I
1987
#undef LVE
1988

    
1989
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
1990
{
1991
    int i, j = (sh & 0xf);
1992

    
1993
    VECTOR_FOR_INORDER_I (i, u8) {
1994
        r->u8[i] = j++;
1995
    }
1996
}
1997

    
1998
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
1999
{
2000
    int i, j = 0x10 - (sh & 0xf);
2001

    
2002
    VECTOR_FOR_INORDER_I (i, u8) {
2003
        r->u8[i] = j++;
2004
    }
2005
}
2006

    
2007
#define STVE(name, access, swap, element)                       \
2008
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2009
    {                                                           \
2010
        size_t n_elems = ARRAY_SIZE(r->element);                \
2011
        int adjust = HI_IDX*(n_elems-1);                        \
2012
        int sh = sizeof(r->element[0]) >> 1;                    \
2013
        int index = (addr & 0xf) >> sh;                         \
2014
        if(msr_le) {                                            \
2015
            access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2016
        } else {                                                        \
2017
            access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2018
        }                                                               \
2019
    }
2020
#define I(x) (x)
2021
STVE(stvebx, stb, I, u8)
2022
STVE(stvehx, stw, bswap16, u16)
2023
STVE(stvewx, stl, bswap32, u32)
2024
#undef I
2025
#undef LVE
2026

    
2027
void helper_mtvscr (ppc_avr_t *r)
2028
{
2029
#if defined(HOST_WORDS_BIGENDIAN)
2030
    env->vscr = r->u32[3];
2031
#else
2032
    env->vscr = r->u32[0];
2033
#endif
2034
    set_flush_to_zero(vscr_nj, &env->vec_status);
2035
}
2036

    
2037
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2038
{
2039
    int i;
2040
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2041
        r->u32[i] = ~a->u32[i] < b->u32[i];
2042
    }
2043
}
2044

    
2045
#define VARITH_DO(name, op, element)        \
2046
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2047
{                                                                       \
2048
    int i;                                                              \
2049
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2050
        r->element[i] = a->element[i] op b->element[i];                 \
2051
    }                                                                   \
2052
}
2053
#define VARITH(suffix, element)                  \
2054
  VARITH_DO(add##suffix, +, element)             \
2055
  VARITH_DO(sub##suffix, -, element)
2056
VARITH(ubm, u8)
2057
VARITH(uhm, u16)
2058
VARITH(uwm, u32)
2059
#undef VARITH_DO
2060
#undef VARITH
2061

    
2062
#define VARITHFP(suffix, func)                                          \
2063
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2064
    {                                                                   \
2065
        int i;                                                          \
2066
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2067
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2068
                r->f[i] = func(a->f[i], b->f[i], &env->vec_status);     \
2069
            }                                                           \
2070
        }                                                               \
2071
    }
2072
VARITHFP(addfp, float32_add)
2073
VARITHFP(subfp, float32_sub)
2074
#undef VARITHFP
2075

    
2076
#define VARITHSAT_CASE(type, op, cvt, element)                          \
2077
    {                                                                   \
2078
        type result = (type)a->element[i] op (type)b->element[i];       \
2079
        r->element[i] = cvt(result, &sat);                              \
2080
    }
2081

    
2082
#define VARITHSAT_DO(name, op, optype, cvt, element)                    \
2083
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2084
    {                                                                   \
2085
        int sat = 0;                                                    \
2086
        int i;                                                          \
2087
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2088
            switch (sizeof(r->element[0])) {                            \
2089
            case 1: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2090
            case 2: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2091
            case 4: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2092
            }                                                           \
2093
        }                                                               \
2094
        if (sat) {                                                      \
2095
            env->vscr |= (1 << VSCR_SAT);                               \
2096
        }                                                               \
2097
    }
2098
#define VARITHSAT_SIGNED(suffix, element, optype, cvt)        \
2099
    VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)    \
2100
    VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2101
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)       \
2102
    VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)     \
2103
    VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2104
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2105
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2106
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2107
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2108
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2109
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2110
#undef VARITHSAT_CASE
2111
#undef VARITHSAT_DO
2112
#undef VARITHSAT_SIGNED
2113
#undef VARITHSAT_UNSIGNED
2114

    
2115
#define VAVG_DO(name, element, etype)                                   \
2116
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2117
    {                                                                   \
2118
        int i;                                                          \
2119
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2120
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2121
            r->element[i] = x >> 1;                                     \
2122
        }                                                               \
2123
    }
2124

    
2125
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2126
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2127
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2128
VAVG(b, s8, int16_t, u8, uint16_t)
2129
VAVG(h, s16, int32_t, u16, uint32_t)
2130
VAVG(w, s32, int64_t, u32, uint64_t)
2131
#undef VAVG_DO
2132
#undef VAVG
2133

    
2134
#define VCF(suffix, cvt, element)                                       \
2135
    void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2136
    {                                                                   \
2137
        int i;                                                          \
2138
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2139
            float32 t = cvt(b->element[i], &env->vec_status);           \
2140
            r->f[i] = float32_scalbn (t, -uim, &env->vec_status);       \
2141
        }                                                               \
2142
    }
2143
VCF(ux, uint32_to_float32, u32)
2144
VCF(sx, int32_to_float32, s32)
2145
#undef VCF
2146

    
2147
#define VCMP_DO(suffix, compare, element, record)                       \
2148
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2149
    {                                                                   \
2150
        uint32_t ones = (uint32_t)-1;                                   \
2151
        uint32_t all = ones;                                            \
2152
        uint32_t none = 0;                                              \
2153
        int i;                                                          \
2154
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2155
            uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2156
            switch (sizeof (a->element[0])) {                           \
2157
            case 4: r->u32[i] = result; break;                          \
2158
            case 2: r->u16[i] = result; break;                          \
2159
            case 1: r->u8[i] = result; break;                           \
2160
            }                                                           \
2161
            all &= result;                                              \
2162
            none |= result;                                             \
2163
        }                                                               \
2164
        if (record) {                                                   \
2165
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2166
        }                                                               \
2167
    }
2168
#define VCMP(suffix, compare, element)          \
2169
    VCMP_DO(suffix, compare, element, 0)        \
2170
    VCMP_DO(suffix##_dot, compare, element, 1)
2171
VCMP(equb, ==, u8)
2172
VCMP(equh, ==, u16)
2173
VCMP(equw, ==, u32)
2174
VCMP(gtub, >, u8)
2175
VCMP(gtuh, >, u16)
2176
VCMP(gtuw, >, u32)
2177
VCMP(gtsb, >, s8)
2178
VCMP(gtsh, >, s16)
2179
VCMP(gtsw, >, s32)
2180
#undef VCMP_DO
2181
#undef VCMP
2182

    
2183
#define VCMPFP_DO(suffix, compare, order, record)                       \
2184
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2185
    {                                                                   \
2186
        uint32_t ones = (uint32_t)-1;                                   \
2187
        uint32_t all = ones;                                            \
2188
        uint32_t none = 0;                                              \
2189
        int i;                                                          \
2190
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2191
            uint32_t result;                                            \
2192
            int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2193
            if (rel == float_relation_unordered) {                      \
2194
                result = 0;                                             \
2195
            } else if (rel compare order) {                             \
2196
                result = ones;                                          \
2197
            } else {                                                    \
2198
                result = 0;                                             \
2199
            }                                                           \
2200
            r->u32[i] = result;                                         \
2201
            all &= result;                                              \
2202
            none |= result;                                             \
2203
        }                                                               \
2204
        if (record) {                                                   \
2205
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2206
        }                                                               \
2207
    }
2208
#define VCMPFP(suffix, compare, order)           \
2209
    VCMPFP_DO(suffix, compare, order, 0)         \
2210
    VCMPFP_DO(suffix##_dot, compare, order, 1)
2211
VCMPFP(eqfp, ==, float_relation_equal)
2212
VCMPFP(gefp, !=, float_relation_less)
2213
VCMPFP(gtfp, ==, float_relation_greater)
2214
#undef VCMPFP_DO
2215
#undef VCMPFP
2216

    
2217
static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2218
                                    int record)
2219
{
2220
    int i;
2221
    int all_in = 0;
2222
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2223
        int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2224
        if (le_rel == float_relation_unordered) {
2225
            r->u32[i] = 0xc0000000;
2226
            /* ALL_IN does not need to be updated here.  */
2227
        } else {
2228
            float32 bneg = float32_chs(b->f[i]);
2229
            int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2230
            int le = le_rel != float_relation_greater;
2231
            int ge = ge_rel != float_relation_less;
2232
            r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2233
            all_in |= (!le | !ge);
2234
        }
2235
    }
2236
    if (record) {
2237
        env->crf[6] = (all_in == 0) << 1;
2238
    }
2239
}
2240

    
2241
void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2242
{
2243
    vcmpbfp_internal(r, a, b, 0);
2244
}
2245

    
2246
void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2247
{
2248
    vcmpbfp_internal(r, a, b, 1);
2249
}
2250

    
2251
#define VCT(suffix, satcvt, element)                                    \
2252
    void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2253
    {                                                                   \
2254
        int i;                                                          \
2255
        int sat = 0;                                                    \
2256
        float_status s = env->vec_status;                               \
2257
        set_float_rounding_mode(float_round_to_zero, &s);               \
2258
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2259
            if (float32_is_any_nan(b->f[i])) {                          \
2260
                r->element[i] = 0;                                      \
2261
            } else {                                                    \
2262
                float64 t = float32_to_float64(b->f[i], &s);            \
2263
                int64_t j;                                              \
2264
                t = float64_scalbn(t, uim, &s);                         \
2265
                j = float64_to_int64(t, &s);                            \
2266
                r->element[i] = satcvt(j, &sat);                        \
2267
            }                                                           \
2268
        }                                                               \
2269
        if (sat) {                                                      \
2270
            env->vscr |= (1 << VSCR_SAT);                               \
2271
        }                                                               \
2272
    }
2273
VCT(uxs, cvtsduw, u32)
2274
VCT(sxs, cvtsdsw, s32)
2275
#undef VCT
2276

    
2277
void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2278
{
2279
    int i;
2280
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2281
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2282
            /* Need to do the computation in higher precision and round
2283
             * once at the end.  */
2284
            float64 af, bf, cf, t;
2285
            af = float32_to_float64(a->f[i], &env->vec_status);
2286
            bf = float32_to_float64(b->f[i], &env->vec_status);
2287
            cf = float32_to_float64(c->f[i], &env->vec_status);
2288
            t = float64_mul(af, cf, &env->vec_status);
2289
            t = float64_add(t, bf, &env->vec_status);
2290
            r->f[i] = float64_to_float32(t, &env->vec_status);
2291
        }
2292
    }
2293
}
2294

    
2295
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2296
{
2297
    int sat = 0;
2298
    int i;
2299

    
2300
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2301
        int32_t prod = a->s16[i] * b->s16[i];
2302
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2303
        r->s16[i] = cvtswsh (t, &sat);
2304
    }
2305

    
2306
    if (sat) {
2307
        env->vscr |= (1 << VSCR_SAT);
2308
    }
2309
}
2310

    
2311
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2312
{
2313
    int sat = 0;
2314
    int i;
2315

    
2316
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2317
        int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2318
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2319
        r->s16[i] = cvtswsh (t, &sat);
2320
    }
2321

    
2322
    if (sat) {
2323
        env->vscr |= (1 << VSCR_SAT);
2324
    }
2325
}
2326

    
2327
#define VMINMAX_DO(name, compare, element)                              \
2328
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2329
    {                                                                   \
2330
        int i;                                                          \
2331
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2332
            if (a->element[i] compare b->element[i]) {                  \
2333
                r->element[i] = b->element[i];                          \
2334
            } else {                                                    \
2335
                r->element[i] = a->element[i];                          \
2336
            }                                                           \
2337
        }                                                               \
2338
    }
2339
#define VMINMAX(suffix, element)                \
2340
  VMINMAX_DO(min##suffix, >, element)           \
2341
  VMINMAX_DO(max##suffix, <, element)
2342
VMINMAX(sb, s8)
2343
VMINMAX(sh, s16)
2344
VMINMAX(sw, s32)
2345
VMINMAX(ub, u8)
2346
VMINMAX(uh, u16)
2347
VMINMAX(uw, u32)
2348
#undef VMINMAX_DO
2349
#undef VMINMAX
2350

    
2351
#define VMINMAXFP(suffix, rT, rF)                                       \
2352
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2353
    {                                                                   \
2354
        int i;                                                          \
2355
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2356
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2357
                if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2358
                    r->f[i] = rT->f[i];                                 \
2359
                } else {                                                \
2360
                    r->f[i] = rF->f[i];                                 \
2361
                }                                                       \
2362
            }                                                           \
2363
        }                                                               \
2364
    }
2365
VMINMAXFP(minfp, a, b)
2366
VMINMAXFP(maxfp, b, a)
2367
#undef VMINMAXFP
2368

    
2369
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2370
{
2371
    int i;
2372
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2373
        int32_t prod = a->s16[i] * b->s16[i];
2374
        r->s16[i] = (int16_t) (prod + c->s16[i]);
2375
    }
2376
}
2377

    
2378
#define VMRG_DO(name, element, highp)                                   \
2379
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2380
    {                                                                   \
2381
        ppc_avr_t result;                                               \
2382
        int i;                                                          \
2383
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2384
        for (i = 0; i < n_elems/2; i++) {                               \
2385
            if (highp) {                                                \
2386
                result.element[i*2+HI_IDX] = a->element[i];             \
2387
                result.element[i*2+LO_IDX] = b->element[i];             \
2388
            } else {                                                    \
2389
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2390
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2391
            }                                                           \
2392
        }                                                               \
2393
        *r = result;                                                    \
2394
    }
2395
#if defined(HOST_WORDS_BIGENDIAN)
2396
#define MRGHI 0
2397
#define MRGLO 1
2398
#else
2399
#define MRGHI 1
2400
#define MRGLO 0
2401
#endif
2402
#define VMRG(suffix, element)                   \
2403
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2404
  VMRG_DO(mrgh##suffix, element, MRGLO)
2405
VMRG(b, u8)
2406
VMRG(h, u16)
2407
VMRG(w, u32)
2408
#undef VMRG_DO
2409
#undef VMRG
2410
#undef MRGHI
2411
#undef MRGLO
2412

    
2413
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2414
{
2415
    int32_t prod[16];
2416
    int i;
2417

    
2418
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2419
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2420
    }
2421

    
2422
    VECTOR_FOR_INORDER_I(i, s32) {
2423
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2424
    }
2425
}
2426

    
2427
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2428
{
2429
    int32_t prod[8];
2430
    int i;
2431

    
2432
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2433
        prod[i] = a->s16[i] * b->s16[i];
2434
    }
2435

    
2436
    VECTOR_FOR_INORDER_I(i, s32) {
2437
        r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2438
    }
2439
}
2440

    
2441
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2442
{
2443
    int32_t prod[8];
2444
    int i;
2445
    int sat = 0;
2446

    
2447
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2448
        prod[i] = (int32_t)a->s16[i] * b->s16[i];
2449
    }
2450

    
2451
    VECTOR_FOR_INORDER_I (i, s32) {
2452
        int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2453
        r->u32[i] = cvtsdsw(t, &sat);
2454
    }
2455

    
2456
    if (sat) {
2457
        env->vscr |= (1 << VSCR_SAT);
2458
    }
2459
}
2460

    
2461
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2462
{
2463
    uint16_t prod[16];
2464
    int i;
2465

    
2466
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2467
        prod[i] = a->u8[i] * b->u8[i];
2468
    }
2469

    
2470
    VECTOR_FOR_INORDER_I(i, u32) {
2471
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2472
    }
2473
}
2474

    
2475
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2476
{
2477
    uint32_t prod[8];
2478
    int i;
2479

    
2480
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2481
        prod[i] = a->u16[i] * b->u16[i];
2482
    }
2483

    
2484
    VECTOR_FOR_INORDER_I(i, u32) {
2485
        r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2486
    }
2487
}
2488

    
2489
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2490
{
2491
    uint32_t prod[8];
2492
    int i;
2493
    int sat = 0;
2494

    
2495
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2496
        prod[i] = a->u16[i] * b->u16[i];
2497
    }
2498

    
2499
    VECTOR_FOR_INORDER_I (i, s32) {
2500
        uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2501
        r->u32[i] = cvtuduw(t, &sat);
2502
    }
2503

    
2504
    if (sat) {
2505
        env->vscr |= (1 << VSCR_SAT);
2506
    }
2507
}
2508

    
2509
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2510
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2511
    {                                                                   \
2512
        int i;                                                          \
2513
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2514
            if (evenp) {                                                \
2515
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2516
            } else {                                                    \
2517
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2518
            }                                                           \
2519
        }                                                               \
2520
    }
2521
#define VMUL(suffix, mul_element, prod_element) \
2522
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2523
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2524
VMUL(sb, s8, s16)
2525
VMUL(sh, s16, s32)
2526
VMUL(ub, u8, u16)
2527
VMUL(uh, u16, u32)
2528
#undef VMUL_DO
2529
#undef VMUL
2530

    
2531
void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2532
{
2533
    int i;
2534
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2535
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2536
            /* Need to do the computation is higher precision and round
2537
             * once at the end.  */
2538
            float64 af, bf, cf, t;
2539
            af = float32_to_float64(a->f[i], &env->vec_status);
2540
            bf = float32_to_float64(b->f[i], &env->vec_status);
2541
            cf = float32_to_float64(c->f[i], &env->vec_status);
2542
            t = float64_mul(af, cf, &env->vec_status);
2543
            t = float64_sub(t, bf, &env->vec_status);
2544
            t = float64_chs(t);
2545
            r->f[i] = float64_to_float32(t, &env->vec_status);
2546
        }
2547
    }
2548
}
2549

    
2550
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2551
{
2552
    ppc_avr_t result;
2553
    int i;
2554
    VECTOR_FOR_INORDER_I (i, u8) {
2555
        int s = c->u8[i] & 0x1f;
2556
#if defined(HOST_WORDS_BIGENDIAN)
2557
        int index = s & 0xf;
2558
#else
2559
        int index = 15 - (s & 0xf);
2560
#endif
2561
        if (s & 0x10) {
2562
            result.u8[i] = b->u8[index];
2563
        } else {
2564
            result.u8[i] = a->u8[index];
2565
        }
2566
    }
2567
    *r = result;
2568
}
2569

    
2570
#if defined(HOST_WORDS_BIGENDIAN)
2571
#define PKBIG 1
2572
#else
2573
#define PKBIG 0
2574
#endif
2575
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2576
{
2577
    int i, j;
2578
    ppc_avr_t result;
2579
#if defined(HOST_WORDS_BIGENDIAN)
2580
    const ppc_avr_t *x[2] = { a, b };
2581
#else
2582
    const ppc_avr_t *x[2] = { b, a };
2583
#endif
2584

    
2585
    VECTOR_FOR_INORDER_I (i, u64) {
2586
        VECTOR_FOR_INORDER_I (j, u32){
2587
            uint32_t e = x[i]->u32[j];
2588
            result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2589
                                 ((e >> 6) & 0x3e0) |
2590
                                 ((e >> 3) & 0x1f));
2591
        }
2592
    }
2593
    *r = result;
2594
}
2595

    
2596
#define VPK(suffix, from, to, cvt, dosat)       \
2597
    void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2598
    {                                                                   \
2599
        int i;                                                          \
2600
        int sat = 0;                                                    \
2601
        ppc_avr_t result;                                               \
2602
        ppc_avr_t *a0 = PKBIG ? a : b;                                  \
2603
        ppc_avr_t *a1 = PKBIG ? b : a;                                  \
2604
        VECTOR_FOR_INORDER_I (i, from) {                                \
2605
            result.to[i] = cvt(a0->from[i], &sat);                      \
2606
            result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
2607
        }                                                               \
2608
        *r = result;                                                    \
2609
        if (dosat && sat) {                                             \
2610
            env->vscr |= (1 << VSCR_SAT);                               \
2611
        }                                                               \
2612
    }
2613
#define I(x, y) (x)
2614
VPK(shss, s16, s8, cvtshsb, 1)
2615
VPK(shus, s16, u8, cvtshub, 1)
2616
VPK(swss, s32, s16, cvtswsh, 1)
2617
VPK(swus, s32, u16, cvtswuh, 1)
2618
VPK(uhus, u16, u8, cvtuhub, 1)
2619
VPK(uwus, u32, u16, cvtuwuh, 1)
2620
VPK(uhum, u16, u8, I, 0)
2621
VPK(uwum, u32, u16, I, 0)
2622
#undef I
2623
#undef VPK
2624
#undef PKBIG
2625

    
2626
void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2627
{
2628
    int i;
2629
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2630
        HANDLE_NAN1(r->f[i], b->f[i]) {
2631
            r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2632
        }
2633
    }
2634
}
2635

    
2636
#define VRFI(suffix, rounding)                                          \
2637
    void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2638
    {                                                                   \
2639
        int i;                                                          \
2640
        float_status s = env->vec_status;                               \
2641
        set_float_rounding_mode(rounding, &s);                          \
2642
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2643
            HANDLE_NAN1(r->f[i], b->f[i]) {                             \
2644
                r->f[i] = float32_round_to_int (b->f[i], &s);           \
2645
            }                                                           \
2646
        }                                                               \
2647
    }
2648
VRFI(n, float_round_nearest_even)
2649
VRFI(m, float_round_down)
2650
VRFI(p, float_round_up)
2651
VRFI(z, float_round_to_zero)
2652
#undef VRFI
2653

    
2654
#define VROTATE(suffix, element)                                        \
2655
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2656
    {                                                                   \
2657
        int i;                                                          \
2658
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2659
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2660
            unsigned int shift = b->element[i] & mask;                  \
2661
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2662
        }                                                               \
2663
    }
2664
VROTATE(b, u8)
2665
VROTATE(h, u16)
2666
VROTATE(w, u32)
2667
#undef VROTATE
2668

    
2669
void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2670
{
2671
    int i;
2672
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2673
        HANDLE_NAN1(r->f[i], b->f[i]) {
2674
            float32 t = float32_sqrt(b->f[i], &env->vec_status);
2675
            r->f[i] = float32_div(float32_one, t, &env->vec_status);
2676
        }
2677
    }
2678
}
2679

    
2680
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2681
{
2682
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2683
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2684
}
2685

    
2686
void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
2687
{
2688
    int i;
2689
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2690
        HANDLE_NAN1(r->f[i], b->f[i]) {
2691
            r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2692
        }
2693
    }
2694
}
2695

    
2696
void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2697
{
2698
    int i;
2699
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2700
        HANDLE_NAN1(r->f[i], b->f[i]) {
2701
            r->f[i] = float32_log2(b->f[i], &env->vec_status);
2702
        }
2703
    }
2704
}
2705

    
2706
#if defined(HOST_WORDS_BIGENDIAN)
2707
#define LEFT 0
2708
#define RIGHT 1
2709
#else
2710
#define LEFT 1
2711
#define RIGHT 0
2712
#endif
2713
/* The specification says that the results are undefined if all of the
2714
 * shift counts are not identical.  We check to make sure that they are
2715
 * to conform to what real hardware appears to do.  */
2716
#define VSHIFT(suffix, leftp)                                           \
2717
    void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
2718
    {                                                                   \
2719
        int shift = b->u8[LO_IDX*15] & 0x7;                             \
2720
        int doit = 1;                                                   \
2721
        int i;                                                          \
2722
        for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
2723
            doit = doit && ((b->u8[i] & 0x7) == shift);                 \
2724
        }                                                               \
2725
        if (doit) {                                                     \
2726
            if (shift == 0) {                                           \
2727
                *r = *a;                                                \
2728
            } else if (leftp) {                                         \
2729
                uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
2730
                r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
2731
                r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
2732
            } else {                                                    \
2733
                uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
2734
                r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
2735
                r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
2736
            }                                                           \
2737
        }                                                               \
2738
    }
2739
VSHIFT(l, LEFT)
2740
VSHIFT(r, RIGHT)
2741
#undef VSHIFT
2742
#undef LEFT
2743
#undef RIGHT
2744

    
2745
#define VSL(suffix, element)                                            \
2746
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2747
    {                                                                   \
2748
        int i;                                                          \
2749
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2750
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2751
            unsigned int shift = b->element[i] & mask;                  \
2752
            r->element[i] = a->element[i] << shift;                     \
2753
        }                                                               \
2754
    }
2755
VSL(b, u8)
2756
VSL(h, u16)
2757
VSL(w, u32)
2758
#undef VSL
2759

    
2760
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2761
{
2762
    int sh = shift & 0xf;
2763
    int i;
2764
    ppc_avr_t result;
2765

    
2766
#if defined(HOST_WORDS_BIGENDIAN)
2767
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2768
        int index = sh + i;
2769
        if (index > 0xf) {
2770
            result.u8[i] = b->u8[index-0x10];
2771
        } else {
2772
            result.u8[i] = a->u8[index];
2773
        }
2774
    }
2775
#else
2776
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2777
        int index = (16 - sh) + i;
2778
        if (index > 0xf) {
2779
            result.u8[i] = a->u8[index-0x10];
2780
        } else {
2781
            result.u8[i] = b->u8[index];
2782
        }
2783
    }
2784
#endif
2785
    *r = result;
2786
}
2787

    
2788
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2789
{
2790
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2791

    
2792
#if defined (HOST_WORDS_BIGENDIAN)
2793
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2794
  memset (&r->u8[16-sh], 0, sh);
2795
#else
2796
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2797
  memset (&r->u8[0], 0, sh);
2798
#endif
2799
}
2800

    
2801
/* Experimental testing shows that hardware masks the immediate.  */
2802
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2803
#if defined(HOST_WORDS_BIGENDIAN)
2804
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2805
#else
2806
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2807
#endif
2808
#define VSPLT(suffix, element)                                          \
2809
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2810
    {                                                                   \
2811
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2812
        int i;                                                          \
2813
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2814
            r->element[i] = s;                                          \
2815
        }                                                               \
2816
    }
2817
VSPLT(b, u8)
2818
VSPLT(h, u16)
2819
VSPLT(w, u32)
2820
#undef VSPLT
2821
#undef SPLAT_ELEMENT
2822
#undef _SPLAT_MASKED
2823

    
2824
#define VSPLTI(suffix, element, splat_type)                     \
2825
    void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat)  \
2826
    {                                                           \
2827
        splat_type x = (int8_t)(splat << 3) >> 3;               \
2828
        int i;                                                  \
2829
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
2830
            r->element[i] = x;                                  \
2831
        }                                                       \
2832
    }
2833
VSPLTI(b, s8, int8_t)
2834
VSPLTI(h, s16, int16_t)
2835
VSPLTI(w, s32, int32_t)
2836
#undef VSPLTI
2837

    
2838
#define VSR(suffix, element)                                            \
2839
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2840
    {                                                                   \
2841
        int i;                                                          \
2842
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2843
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2844
            unsigned int shift = b->element[i] & mask;                  \
2845
            r->element[i] = a->element[i] >> shift;                     \
2846
        }                                                               \
2847
    }
2848
VSR(ab, s8)
2849
VSR(ah, s16)
2850
VSR(aw, s32)
2851
VSR(b, u8)
2852
VSR(h, u16)
2853
VSR(w, u32)
2854
#undef VSR
2855

    
2856
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2857
{
2858
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2859

    
2860
#if defined (HOST_WORDS_BIGENDIAN)
2861
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2862
  memset (&r->u8[0], 0, sh);
2863
#else
2864
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2865
  memset (&r->u8[16-sh], 0, sh);
2866
#endif
2867
}
2868

    
2869
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2870
{
2871
    int i;
2872
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2873
        r->u32[i] = a->u32[i] >= b->u32[i];
2874
    }
2875
}
2876

    
2877
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2878
{
2879
    int64_t t;
2880
    int i, upper;
2881
    ppc_avr_t result;
2882
    int sat = 0;
2883

    
2884
#if defined(HOST_WORDS_BIGENDIAN)
2885
    upper = ARRAY_SIZE(r->s32)-1;
2886
#else
2887
    upper = 0;
2888
#endif
2889
    t = (int64_t)b->s32[upper];
2890
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2891
        t += a->s32[i];
2892
        result.s32[i] = 0;
2893
    }
2894
    result.s32[upper] = cvtsdsw(t, &sat);
2895
    *r = result;
2896

    
2897
    if (sat) {
2898
        env->vscr |= (1 << VSCR_SAT);
2899
    }
2900
}
2901

    
2902
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2903
{
2904
    int i, j, upper;
2905
    ppc_avr_t result;
2906
    int sat = 0;
2907

    
2908
#if defined(HOST_WORDS_BIGENDIAN)
2909
    upper = 1;
2910
#else
2911
    upper = 0;
2912
#endif
2913
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2914
        int64_t t = (int64_t)b->s32[upper+i*2];
2915
        result.u64[i] = 0;
2916
        for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2917
            t += a->s32[2*i+j];
2918
        }
2919
        result.s32[upper+i*2] = cvtsdsw(t, &sat);
2920
    }
2921

    
2922
    *r = result;
2923
    if (sat) {
2924
        env->vscr |= (1 << VSCR_SAT);
2925
    }
2926
}
2927

    
2928
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2929
{
2930
    int i, j;
2931
    int sat = 0;
2932

    
2933
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2934
        int64_t t = (int64_t)b->s32[i];
2935
        for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2936
            t += a->s8[4*i+j];
2937
        }
2938
        r->s32[i] = cvtsdsw(t, &sat);
2939
    }
2940

    
2941
    if (sat) {
2942
        env->vscr |= (1 << VSCR_SAT);
2943
    }
2944
}
2945

    
2946
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2947
{
2948
    int sat = 0;
2949
    int i;
2950

    
2951
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2952
        int64_t t = (int64_t)b->s32[i];
2953
        t += a->s16[2*i] + a->s16[2*i+1];
2954
        r->s32[i] = cvtsdsw(t, &sat);
2955
    }
2956

    
2957
    if (sat) {
2958
        env->vscr |= (1 << VSCR_SAT);
2959
    }
2960
}
2961

    
2962
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2963
{
2964
    int i, j;
2965
    int sat = 0;
2966

    
2967
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2968
        uint64_t t = (uint64_t)b->u32[i];
2969
        for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2970
            t += a->u8[4*i+j];
2971
        }
2972
        r->u32[i] = cvtuduw(t, &sat);
2973
    }
2974

    
2975
    if (sat) {
2976
        env->vscr |= (1 << VSCR_SAT);
2977
    }
2978
}
2979

    
2980
#if defined(HOST_WORDS_BIGENDIAN)
2981
#define UPKHI 1
2982
#define UPKLO 0
2983
#else
2984
#define UPKHI 0
2985
#define UPKLO 1
2986
#endif
2987
#define VUPKPX(suffix, hi)                                      \
2988
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
2989
    {                                                           \
2990
        int i;                                                  \
2991
        ppc_avr_t result;                                       \
2992
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
2993
            uint16_t e = b->u16[hi ? i : i+4];                  \
2994
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
2995
            uint8_t r = (e >> 10) & 0x1f;                       \
2996
            uint8_t g = (e >> 5) & 0x1f;                        \
2997
            uint8_t b = e & 0x1f;                               \
2998
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
2999
        }                                                               \
3000
        *r = result;                                                    \
3001
    }
3002
VUPKPX(lpx, UPKLO)
3003
VUPKPX(hpx, UPKHI)
3004
#undef VUPKPX
3005

    
3006
#define VUPK(suffix, unpacked, packee, hi)                              \
3007
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
3008
    {                                                                   \
3009
        int i;                                                          \
3010
        ppc_avr_t result;                                               \
3011
        if (hi) {                                                       \
3012
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
3013
                result.unpacked[i] = b->packee[i];                      \
3014
            }                                                           \
3015
        } else {                                                        \
3016
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3017
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3018
            }                                                           \
3019
        }                                                               \
3020
        *r = result;                                                    \
3021
    }
3022
VUPK(hsb, s16, s8, UPKHI)
3023
VUPK(hsh, s32, s16, UPKHI)
3024
VUPK(lsb, s16, s8, UPKLO)
3025
VUPK(lsh, s32, s16, UPKLO)
3026
#undef VUPK
3027
#undef UPKHI
3028
#undef UPKLO
3029

    
3030
#undef DO_HANDLE_NAN
3031
#undef HANDLE_NAN1
3032
#undef HANDLE_NAN2
3033
#undef HANDLE_NAN3
3034
#undef VECTOR_FOR_INORDER_I
3035
#undef HI_IDX
3036
#undef LO_IDX
3037

    
3038
/*****************************************************************************/
3039
/* SPE extension helpers */
3040
/* Use a table to make this quicker */
3041
static uint8_t hbrev[16] = {
3042
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3043
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3044
};
3045

    
3046
static inline uint8_t byte_reverse(uint8_t val)
3047
{
3048
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3049
}
3050

    
3051
static inline uint32_t word_reverse(uint32_t val)
3052
{
3053
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3054
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3055
}
3056

    
3057
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3058
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3059
{
3060
    uint32_t a, b, d, mask;
3061

    
3062
    mask = UINT32_MAX >> (32 - MASKBITS);
3063
    a = arg1 & mask;
3064
    b = arg2 & mask;
3065
    d = word_reverse(1 + word_reverse(a | ~b));
3066
    return (arg1 & ~mask) | (d & b);
3067
}
3068

    
3069
uint32_t helper_cntlsw32 (uint32_t val)
3070
{
3071
    if (val & 0x80000000)
3072
        return clz32(~val);
3073
    else
3074
        return clz32(val);
3075
}
3076

    
3077
uint32_t helper_cntlzw32 (uint32_t val)
3078
{
3079
    return clz32(val);
3080
}
3081

    
3082
/* Single-precision floating-point conversions */
3083
static inline uint32_t efscfsi(uint32_t val)
3084
{
3085
    CPU_FloatU u;
3086

    
3087
    u.f = int32_to_float32(val, &env->vec_status);
3088

    
3089
    return u.l;
3090
}
3091

    
3092
static inline uint32_t efscfui(uint32_t val)
3093
{
3094
    CPU_FloatU u;
3095

    
3096
    u.f = uint32_to_float32(val, &env->vec_status);
3097

    
3098
    return u.l;
3099
}
3100

    
3101
static inline int32_t efsctsi(uint32_t val)
3102
{
3103
    CPU_FloatU u;
3104

    
3105
    u.l = val;
3106
    /* NaN are not treated the same way IEEE 754 does */
3107
    if (unlikely(float32_is_quiet_nan(u.f)))
3108
        return 0;
3109

    
3110
    return float32_to_int32(u.f, &env->vec_status);
3111
}
3112

    
3113
static inline uint32_t efsctui(uint32_t val)
3114
{
3115
    CPU_FloatU u;
3116

    
3117
    u.l = val;
3118
    /* NaN are not treated the same way IEEE 754 does */
3119
    if (unlikely(float32_is_quiet_nan(u.f)))
3120
        return 0;
3121

    
3122
    return float32_to_uint32(u.f, &env->vec_status);
3123
}
3124

    
3125
static inline uint32_t efsctsiz(uint32_t val)
3126
{
3127
    CPU_FloatU u;
3128

    
3129
    u.l = val;
3130
    /* NaN are not treated the same way IEEE 754 does */
3131
    if (unlikely(float32_is_quiet_nan(u.f)))
3132
        return 0;
3133

    
3134
    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3135
}
3136

    
3137
static inline uint32_t efsctuiz(uint32_t val)
3138
{
3139
    CPU_FloatU u;
3140

    
3141
    u.l = val;
3142
    /* NaN are not treated the same way IEEE 754 does */
3143
    if (unlikely(float32_is_quiet_nan(u.f)))
3144
        return 0;
3145

    
3146
    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3147
}
3148

    
3149
static inline uint32_t efscfsf(uint32_t val)
3150
{
3151
    CPU_FloatU u;
3152
    float32 tmp;
3153

    
3154
    u.f = int32_to_float32(val, &env->vec_status);
3155
    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3156
    u.f = float32_div(u.f, tmp, &env->vec_status);
3157

    
3158
    return u.l;
3159
}
3160

    
3161
static inline uint32_t efscfuf(uint32_t val)
3162
{
3163
    CPU_FloatU u;
3164
    float32 tmp;
3165

    
3166
    u.f = uint32_to_float32(val, &env->vec_status);
3167
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3168
    u.f = float32_div(u.f, tmp, &env->vec_status);
3169

    
3170
    return u.l;
3171
}
3172

    
3173
static inline uint32_t efsctsf(uint32_t val)
3174
{
3175
    CPU_FloatU u;
3176
    float32 tmp;
3177

    
3178
    u.l = val;
3179
    /* NaN are not treated the same way IEEE 754 does */
3180
    if (unlikely(float32_is_quiet_nan(u.f)))
3181
        return 0;
3182
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3183
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3184

    
3185
    return float32_to_int32(u.f, &env->vec_status);
3186
}
3187

    
3188
static inline uint32_t efsctuf(uint32_t val)
3189
{
3190
    CPU_FloatU u;
3191
    float32 tmp;
3192

    
3193
    u.l = val;
3194
    /* NaN are not treated the same way IEEE 754 does */
3195
    if (unlikely(float32_is_quiet_nan(u.f)))
3196
        return 0;
3197
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3198
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3199

    
3200
    return float32_to_uint32(u.f, &env->vec_status);
3201
}
3202

    
3203
#define HELPER_SPE_SINGLE_CONV(name)                                          \
3204
uint32_t helper_e##name (uint32_t val)                                        \
3205
{                                                                             \
3206
    return e##name(val);                                                      \
3207
}
3208
/* efscfsi */
3209
HELPER_SPE_SINGLE_CONV(fscfsi);
3210
/* efscfui */
3211
HELPER_SPE_SINGLE_CONV(fscfui);
3212
/* efscfuf */
3213
HELPER_SPE_SINGLE_CONV(fscfuf);
3214
/* efscfsf */
3215
HELPER_SPE_SINGLE_CONV(fscfsf);
3216
/* efsctsi */
3217
HELPER_SPE_SINGLE_CONV(fsctsi);
3218
/* efsctui */
3219
HELPER_SPE_SINGLE_CONV(fsctui);
3220
/* efsctsiz */
3221
HELPER_SPE_SINGLE_CONV(fsctsiz);
3222
/* efsctuiz */
3223
HELPER_SPE_SINGLE_CONV(fsctuiz);
3224
/* efsctsf */
3225
HELPER_SPE_SINGLE_CONV(fsctsf);
3226
/* efsctuf */
3227
HELPER_SPE_SINGLE_CONV(fsctuf);
3228

    
3229
#define HELPER_SPE_VECTOR_CONV(name)                                          \
3230
uint64_t helper_ev##name (uint64_t val)                                       \
3231
{                                                                             \
3232
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
3233
            (uint64_t)e##name(val);                                           \
3234
}
3235
/* evfscfsi */
3236
HELPER_SPE_VECTOR_CONV(fscfsi);
3237
/* evfscfui */
3238
HELPER_SPE_VECTOR_CONV(fscfui);
3239
/* evfscfuf */
3240
HELPER_SPE_VECTOR_CONV(fscfuf);
3241
/* evfscfsf */
3242
HELPER_SPE_VECTOR_CONV(fscfsf);
3243
/* evfsctsi */
3244
HELPER_SPE_VECTOR_CONV(fsctsi);
3245
/* evfsctui */
3246
HELPER_SPE_VECTOR_CONV(fsctui);
3247
/* evfsctsiz */
3248
HELPER_SPE_VECTOR_CONV(fsctsiz);
3249
/* evfsctuiz */
3250
HELPER_SPE_VECTOR_CONV(fsctuiz);
3251
/* evfsctsf */
3252
HELPER_SPE_VECTOR_CONV(fsctsf);
3253
/* evfsctuf */
3254
HELPER_SPE_VECTOR_CONV(fsctuf);
3255

    
3256
/* Single-precision floating-point arithmetic */
3257
static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3258
{
3259
    CPU_FloatU u1, u2;
3260
    u1.l = op1;
3261
    u2.l = op2;
3262
    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3263
    return u1.l;
3264
}
3265

    
3266
static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3267
{
3268
    CPU_FloatU u1, u2;
3269
    u1.l = op1;
3270
    u2.l = op2;
3271
    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3272
    return u1.l;
3273
}
3274

    
3275
static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3276
{
3277
    CPU_FloatU u1, u2;
3278
    u1.l = op1;
3279
    u2.l = op2;
3280
    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3281
    return u1.l;
3282
}
3283

    
3284
static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3285
{
3286
    CPU_FloatU u1, u2;
3287
    u1.l = op1;
3288
    u2.l = op2;
3289
    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3290
    return u1.l;
3291
}
3292

    
3293
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
3294
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3295
{                                                                             \
3296
    return e##name(op1, op2);                                                 \
3297
}
3298
/* efsadd */
3299
HELPER_SPE_SINGLE_ARITH(fsadd);
3300
/* efssub */
3301
HELPER_SPE_SINGLE_ARITH(fssub);
3302
/* efsmul */
3303
HELPER_SPE_SINGLE_ARITH(fsmul);
3304
/* efsdiv */
3305
HELPER_SPE_SINGLE_ARITH(fsdiv);
3306

    
3307
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
3308
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3309
{                                                                             \
3310
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
3311
            (uint64_t)e##name(op1, op2);                                      \
3312
}
3313
/* evfsadd */
3314
HELPER_SPE_VECTOR_ARITH(fsadd);
3315
/* evfssub */
3316
HELPER_SPE_VECTOR_ARITH(fssub);
3317
/* evfsmul */
3318
HELPER_SPE_VECTOR_ARITH(fsmul);
3319
/* evfsdiv */
3320
HELPER_SPE_VECTOR_ARITH(fsdiv);
3321

    
3322
/* Single-precision floating-point comparisons */
3323
static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3324
{
3325
    CPU_FloatU u1, u2;
3326
    u1.l = op1;
3327
    u2.l = op2;
3328
    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3329
}
3330

    
3331
static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3332
{
3333
    CPU_FloatU u1, u2;
3334
    u1.l = op1;
3335
    u2.l = op2;
3336
    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3337
}
3338

    
3339
static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3340
{
3341
    CPU_FloatU u1, u2;
3342
    u1.l = op1;
3343
    u2.l = op2;
3344
    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3345
}
3346

    
3347
static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3348
{
3349
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3350
    return efststlt(op1, op2);
3351
}
3352

    
3353
static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3354
{
3355
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3356
    return efststgt(op1, op2);
3357
}
3358

    
3359
static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3360
{
3361
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3362
    return efststeq(op1, op2);
3363
}
3364

    
3365
#define HELPER_SINGLE_SPE_CMP(name)                                           \
3366
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3367
{                                                                             \
3368
    return e##name(op1, op2) << 2;                                            \
3369
}
3370
/* efststlt */
3371
HELPER_SINGLE_SPE_CMP(fststlt);
3372
/* efststgt */
3373
HELPER_SINGLE_SPE_CMP(fststgt);
3374
/* efststeq */
3375
HELPER_SINGLE_SPE_CMP(fststeq);
3376
/* efscmplt */
3377
HELPER_SINGLE_SPE_CMP(fscmplt);
3378
/* efscmpgt */
3379
HELPER_SINGLE_SPE_CMP(fscmpgt);
3380
/* efscmpeq */
3381
HELPER_SINGLE_SPE_CMP(fscmpeq);
3382

    
3383
static inline uint32_t evcmp_merge(int t0, int t1)
3384
{
3385
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3386
}
3387

    
3388
#define HELPER_VECTOR_SPE_CMP(name)                                           \
3389
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3390
{                                                                             \
3391
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
3392
}
3393
/* evfststlt */
3394
HELPER_VECTOR_SPE_CMP(fststlt);
3395
/* evfststgt */
3396
HELPER_VECTOR_SPE_CMP(fststgt);
3397
/* evfststeq */
3398
HELPER_VECTOR_SPE_CMP(fststeq);
3399
/* evfscmplt */
3400
HELPER_VECTOR_SPE_CMP(fscmplt);
3401
/* evfscmpgt */
3402
HELPER_VECTOR_SPE_CMP(fscmpgt);
3403
/* evfscmpeq */
3404
HELPER_VECTOR_SPE_CMP(fscmpeq);
3405

    
3406
/* Double-precision floating-point conversion */
3407
uint64_t helper_efdcfsi (uint32_t val)
3408
{
3409
    CPU_DoubleU u;
3410

    
3411
    u.d = int32_to_float64(val, &env->vec_status);
3412

    
3413
    return u.ll;
3414
}
3415

    
3416
uint64_t helper_efdcfsid (uint64_t val)
3417
{
3418
    CPU_DoubleU u;
3419

    
3420
    u.d = int64_to_float64(val, &env->vec_status);
3421

    
3422
    return u.ll;
3423
}
3424

    
3425
uint64_t helper_efdcfui (uint32_t val)
3426
{
3427
    CPU_DoubleU u;
3428

    
3429
    u.d = uint32_to_float64(val, &env->vec_status);
3430

    
3431
    return u.ll;
3432
}
3433

    
3434
uint64_t helper_efdcfuid (uint64_t val)
3435
{
3436
    CPU_DoubleU u;
3437

    
3438
    u.d = uint64_to_float64(val, &env->vec_status);
3439

    
3440
    return u.ll;
3441
}
3442

    
3443
uint32_t helper_efdctsi (uint64_t val)
3444
{
3445
    CPU_DoubleU u;
3446

    
3447
    u.ll = val;
3448
    /* NaN are not treated the same way IEEE 754 does */
3449
    if (unlikely(float64_is_quiet_nan(u.d)))
3450
        return 0;
3451

    
3452
    return float64_to_int32(u.d, &env->vec_status);
3453
}
3454

    
3455
uint32_t helper_efdctui (uint64_t val)
3456
{
3457
    CPU_DoubleU u;
3458

    
3459
    u.ll = val;
3460
    /* NaN are not treated the same way IEEE 754 does */
3461
    if (unlikely(float64_is_quiet_nan(u.d)))
3462
        return 0;
3463

    
3464
    return float64_to_uint32(u.d, &env->vec_status);
3465
}
3466

    
3467
uint32_t helper_efdctsiz (uint64_t val)
3468
{
3469
    CPU_DoubleU u;
3470

    
3471
    u.ll = val;
3472
    /* NaN are not treated the same way IEEE 754 does */
3473
    if (unlikely(float64_is_quiet_nan(u.d)))
3474
        return 0;
3475

    
3476
    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3477
}
3478

    
3479
uint64_t helper_efdctsidz (uint64_t val)
3480
{
3481
    CPU_DoubleU u;
3482

    
3483
    u.ll = val;
3484
    /* NaN are not treated the same way IEEE 754 does */
3485
    if (unlikely(float64_is_quiet_nan(u.d)))
3486
        return 0;
3487

    
3488
    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3489
}
3490

    
3491
uint32_t helper_efdctuiz (uint64_t val)
3492
{
3493
    CPU_DoubleU u;
3494

    
3495
    u.ll = val;
3496
    /* NaN are not treated the same way IEEE 754 does */
3497
    if (unlikely(float64_is_quiet_nan(u.d)))
3498
        return 0;
3499

    
3500
    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3501
}
3502

    
3503
uint64_t helper_efdctuidz (uint64_t val)
3504
{
3505
    CPU_DoubleU u;
3506

    
3507
    u.ll = val;
3508
    /* NaN are not treated the same way IEEE 754 does */
3509
    if (unlikely(float64_is_quiet_nan(u.d)))
3510
        return 0;
3511

    
3512
    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3513
}
3514

    
3515
uint64_t helper_efdcfsf (uint32_t val)
3516
{
3517
    CPU_DoubleU u;
3518
    float64 tmp;
3519

    
3520
    u.d = int32_to_float64(val, &env->vec_status);
3521
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3522
    u.d = float64_div(u.d, tmp, &env->vec_status);
3523

    
3524
    return u.ll;
3525
}
3526

    
3527
uint64_t helper_efdcfuf (uint32_t val)
3528
{
3529
    CPU_DoubleU u;
3530
    float64 tmp;
3531

    
3532
    u.d = uint32_to_float64(val, &env->vec_status);
3533
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3534
    u.d = float64_div(u.d, tmp, &env->vec_status);
3535

    
3536
    return u.ll;
3537
}
3538

    
3539
uint32_t helper_efdctsf (uint64_t val)
3540
{
3541
    CPU_DoubleU u;
3542
    float64 tmp;
3543

    
3544
    u.ll = val;
3545
    /* NaN are not treated the same way IEEE 754 does */
3546
    if (unlikely(float64_is_quiet_nan(u.d)))
3547
        return 0;
3548
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3549
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3550

    
3551
    return float64_to_int32(u.d, &env->vec_status);
3552
}
3553

    
3554
uint32_t helper_efdctuf (uint64_t val)
3555
{
3556
    CPU_DoubleU u;
3557
    float64 tmp;
3558

    
3559
    u.ll = val;
3560
    /* NaN are not treated the same way IEEE 754 does */
3561
    if (unlikely(float64_is_quiet_nan(u.d)))
3562
        return 0;
3563
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3564
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3565

    
3566
    return float64_to_uint32(u.d, &env->vec_status);
3567
}
3568

    
3569
uint32_t helper_efscfd (uint64_t val)
3570
{
3571
    CPU_DoubleU u1;
3572
    CPU_FloatU u2;
3573

    
3574
    u1.ll = val;
3575
    u2.f = float64_to_float32(u1.d, &env->vec_status);
3576

    
3577
    return u2.l;
3578
}
3579

    
3580
uint64_t helper_efdcfs (uint32_t val)
3581
{
3582
    CPU_DoubleU u2;
3583
    CPU_FloatU u1;
3584

    
3585
    u1.l = val;
3586
    u2.d = float32_to_float64(u1.f, &env->vec_status);
3587

    
3588
    return u2.ll;
3589
}
3590

    
3591
/* Double precision fixed-point arithmetic */
3592
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3593
{
3594
    CPU_DoubleU u1, u2;
3595
    u1.ll = op1;
3596
    u2.ll = op2;
3597
    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3598
    return u1.ll;
3599
}
3600

    
3601
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3602
{
3603
    CPU_DoubleU u1, u2;
3604
    u1.ll = op1;
3605
    u2.ll = op2;
3606
    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3607
    return u1.ll;
3608
}
3609

    
3610
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3611
{
3612
    CPU_DoubleU u1, u2;
3613
    u1.ll = op1;
3614
    u2.ll = op2;
3615
    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3616
    return u1.ll;
3617
}
3618

    
3619
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3620
{
3621
    CPU_DoubleU u1, u2;
3622
    u1.ll = op1;
3623
    u2.ll = op2;
3624
    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3625
    return u1.ll;
3626
}
3627

    
3628
/* Double precision floating point helpers */
3629
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3630
{
3631
    CPU_DoubleU u1, u2;
3632
    u1.ll = op1;
3633
    u2.ll = op2;
3634
    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3635
}
3636

    
3637
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3638
{
3639
    CPU_DoubleU u1, u2;
3640
    u1.ll = op1;
3641
    u2.ll = op2;
3642
    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3643
}
3644

    
3645
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3646
{
3647
    CPU_DoubleU u1, u2;
3648
    u1.ll = op1;
3649
    u2.ll = op2;
3650
    return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3651
}
3652

    
3653
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3654
{
3655
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3656
    return helper_efdtstlt(op1, op2);
3657
}
3658

    
3659
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3660
{
3661
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3662
    return helper_efdtstgt(op1, op2);
3663
}
3664

    
3665
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3666
{
3667
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3668
    return helper_efdtsteq(op1, op2);
3669
}
3670

    
3671
/*****************************************************************************/
3672
/* Softmmu support */
3673
#if !defined (CONFIG_USER_ONLY)
3674

    
3675
#define MMUSUFFIX _mmu
3676

    
3677
#define SHIFT 0
3678
#include "softmmu_template.h"
3679

    
3680
#define SHIFT 1
3681
#include "softmmu_template.h"
3682

    
3683
#define SHIFT 2
3684
#include "softmmu_template.h"
3685

    
3686
#define SHIFT 3
3687
#include "softmmu_template.h"
3688

    
3689
/* try to fill the TLB and return an exception if error. If retaddr is
3690
   NULL, it means that the function was called in C code (i.e. not
3691
   from generated code or from helper.c) */
3692
/* XXX: fix it to restore all registers */
3693
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3694
{
3695
    TranslationBlock *tb;
3696
    CPUState *saved_env;
3697
    unsigned long pc;
3698
    int ret;
3699

    
3700
    /* XXX: hack to restore env in all cases, even if not called from
3701
       generated code */
3702
    saved_env = env;
3703
    env = cpu_single_env;
3704
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3705
    if (unlikely(ret != 0)) {
3706
        if (likely(retaddr)) {
3707
            /* now we have a real cpu fault */
3708
            pc = (unsigned long)retaddr;
3709
            tb = tb_find_pc(pc);
3710
            if (likely(tb)) {
3711
                /* the PC is inside the translated code. It means that we have
3712
                   a virtual CPU fault */
3713
                cpu_restore_state(tb, env, pc, NULL);
3714
            }
3715
        }
3716
        helper_raise_exception_err(env->exception_index, env->error_code);
3717
    }
3718
    env = saved_env;
3719
}
3720

    
3721
/* Segment registers load and store */
3722
target_ulong helper_load_sr (target_ulong sr_num)
3723
{
3724
#if defined(TARGET_PPC64)
3725
    if (env->mmu_model & POWERPC_MMU_64)
3726
        return ppc_load_sr(env, sr_num);
3727
#endif
3728
    return env->sr[sr_num];
3729
}
3730

    
3731
void helper_store_sr (target_ulong sr_num, target_ulong val)
3732
{
3733
    ppc_store_sr(env, sr_num, val);
3734
}
3735

    
3736
/* SLB management */
3737
#if defined(TARGET_PPC64)
3738
target_ulong helper_load_slb (target_ulong slb_nr)
3739
{
3740
    return ppc_load_slb(env, slb_nr);
3741
}
3742

    
3743
void helper_store_slb (target_ulong rb, target_ulong rs)
3744
{
3745
    ppc_store_slb(env, rb, rs);
3746
}
3747

    
3748
void helper_slbia (void)
3749
{
3750
    ppc_slb_invalidate_all(env);
3751
}
3752

    
3753
void helper_slbie (target_ulong addr)
3754
{
3755
    ppc_slb_invalidate_one(env, addr);
3756
}
3757

    
3758
#endif /* defined(TARGET_PPC64) */
3759

    
3760
/* TLB management */
3761
void helper_tlbia (void)
3762
{
3763
    ppc_tlb_invalidate_all(env);
3764
}
3765

    
3766
void helper_tlbie (target_ulong addr)
3767
{
3768
    ppc_tlb_invalidate_one(env, addr);
3769
}
3770

    
3771
/* Software driven TLBs management */
3772
/* PowerPC 602/603 software TLB load instructions helpers */
3773
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3774
{
3775
    target_ulong RPN, CMP, EPN;
3776
    int way;
3777

    
3778
    RPN = env->spr[SPR_RPA];
3779
    if (is_code) {
3780
        CMP = env->spr[SPR_ICMP];
3781
        EPN = env->spr[SPR_IMISS];
3782
    } else {
3783
        CMP = env->spr[SPR_DCMP];
3784
        EPN = env->spr[SPR_DMISS];
3785
    }
3786
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3787
    (void)EPN; /* avoid a compiler warning */
3788
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3789
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3790
              RPN, way);
3791
    /* Store this TLB */
3792
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3793
                     way, is_code, CMP, RPN);
3794
}
3795

    
3796
void helper_6xx_tlbd (target_ulong EPN)
3797
{
3798
    do_6xx_tlb(EPN, 0);
3799
}
3800

    
3801
void helper_6xx_tlbi (target_ulong EPN)
3802
{
3803
    do_6xx_tlb(EPN, 1);
3804
}
3805

    
3806
/* PowerPC 74xx software TLB load instructions helpers */
3807
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3808
{
3809
    target_ulong RPN, CMP, EPN;
3810
    int way;
3811

    
3812
    RPN = env->spr[SPR_PTELO];
3813
    CMP = env->spr[SPR_PTEHI];
3814
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3815
    way = env->spr[SPR_TLBMISS] & 0x3;
3816
    (void)EPN; /* avoid a compiler warning */
3817
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3818
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3819
              RPN, way);
3820
    /* Store this TLB */
3821
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3822
                     way, is_code, CMP, RPN);
3823
}
3824

    
3825
void helper_74xx_tlbd (target_ulong EPN)
3826
{
3827
    do_74xx_tlb(EPN, 0);
3828
}
3829

    
3830
void helper_74xx_tlbi (target_ulong EPN)
3831
{
3832
    do_74xx_tlb(EPN, 1);
3833
}
3834

    
3835
static inline target_ulong booke_tlb_to_page_size(int size)
3836
{
3837
    return 1024 << (2 * size);
3838
}
3839

    
3840
static inline int booke_page_size_to_tlb(target_ulong page_size)
3841
{
3842
    int size;
3843

    
3844
    switch (page_size) {
3845
    case 0x00000400UL:
3846
        size = 0x0;
3847
        break;
3848
    case 0x00001000UL:
3849
        size = 0x1;
3850
        break;
3851
    case 0x00004000UL:
3852
        size = 0x2;
3853
        break;
3854
    case 0x00010000UL:
3855
        size = 0x3;
3856
        break;
3857
    case 0x00040000UL:
3858
        size = 0x4;
3859
        break;
3860
    case 0x00100000UL:
3861
        size = 0x5;
3862
        break;
3863
    case 0x00400000UL:
3864
        size = 0x6;
3865
        break;
3866
    case 0x01000000UL:
3867
        size = 0x7;
3868
        break;
3869
    case 0x04000000UL:
3870
        size = 0x8;
3871
        break;
3872
    case 0x10000000UL:
3873
        size = 0x9;
3874
        break;
3875
    case 0x40000000UL:
3876
        size = 0xA;
3877
        break;
3878
#if defined (TARGET_PPC64)
3879
    case 0x000100000000ULL:
3880
        size = 0xB;
3881
        break;
3882
    case 0x000400000000ULL:
3883
        size = 0xC;
3884
        break;
3885
    case 0x001000000000ULL:
3886
        size = 0xD;
3887
        break;
3888
    case 0x004000000000ULL:
3889
        size = 0xE;
3890
        break;
3891
    case 0x010000000000ULL:
3892
        size = 0xF;
3893
        break;
3894
#endif
3895
    default:
3896
        size = -1;
3897
        break;
3898
    }
3899

    
3900
    return size;
3901
}
3902

    
3903
/* Helpers for 4xx TLB management */
3904
#define PPC4XX_TLB_ENTRY_MASK       0x0000003f  /* Mask for 64 TLB entries */
3905

    
3906
#define PPC4XX_TLBHI_V              0x00000040
3907
#define PPC4XX_TLBHI_E              0x00000020
3908
#define PPC4XX_TLBHI_SIZE_MIN       0
3909
#define PPC4XX_TLBHI_SIZE_MAX       7
3910
#define PPC4XX_TLBHI_SIZE_DEFAULT   1
3911
#define PPC4XX_TLBHI_SIZE_SHIFT     7
3912
#define PPC4XX_TLBHI_SIZE_MASK      0x00000007
3913

    
3914
#define PPC4XX_TLBLO_EX             0x00000200
3915
#define PPC4XX_TLBLO_WR             0x00000100
3916
#define PPC4XX_TLBLO_ATTR_MASK      0x000000FF
3917
#define PPC4XX_TLBLO_RPN_MASK       0xFFFFFC00
3918

    
3919
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3920
{
3921
    ppcemb_tlb_t *tlb;
3922
    target_ulong ret;
3923
    int size;
3924

    
3925
    entry &= PPC4XX_TLB_ENTRY_MASK;
3926
    tlb = &env->tlb[entry].tlbe;
3927
    ret = tlb->EPN;
3928
    if (tlb->prot & PAGE_VALID) {
3929
        ret |= PPC4XX_TLBHI_V;
3930
    }
3931
    size = booke_page_size_to_tlb(tlb->size);
3932
    if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
3933
        size = PPC4XX_TLBHI_SIZE_DEFAULT;
3934
    }
3935
    ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
3936
    env->spr[SPR_40x_PID] = tlb->PID;
3937
    return ret;
3938
}
3939

    
3940
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3941
{
3942
    ppcemb_tlb_t *tlb;
3943
    target_ulong ret;
3944

    
3945
    entry &= PPC4XX_TLB_ENTRY_MASK;
3946
    tlb = &env->tlb[entry].tlbe;
3947
    ret = tlb->RPN;
3948
    if (tlb->prot & PAGE_EXEC) {
3949
        ret |= PPC4XX_TLBLO_EX;
3950
    }
3951
    if (tlb->prot & PAGE_WRITE) {
3952
        ret |= PPC4XX_TLBLO_WR;
3953
    }
3954
    return ret;
3955
}
3956

    
3957
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3958
{
3959
    ppcemb_tlb_t *tlb;
3960
    target_ulong page, end;
3961

    
3962
    LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
3963
              val);
3964
    entry &= PPC4XX_TLB_ENTRY_MASK;
3965
    tlb = &env->tlb[entry].tlbe;
3966
    /* Invalidate previous TLB (if it's valid) */
3967
    if (tlb->prot & PAGE_VALID) {
3968
        end = tlb->EPN + tlb->size;
3969
        LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
3970
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
3971
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
3972
            tlb_flush_page(env, page);
3973
        }
3974
    }
3975
    tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
3976
                                       & PPC4XX_TLBHI_SIZE_MASK);
3977
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3978
     * If this ever occurs, one should use the ppcemb target instead
3979
     * of the ppc or ppc64 one
3980
     */
3981
    if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
3982
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3983
                  "are not supported (%d)\n",
3984
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3985
    }
3986
    tlb->EPN = val & ~(tlb->size - 1);
3987
    if (val & PPC4XX_TLBHI_V) {
3988
        tlb->prot |= PAGE_VALID;
3989
        if (val & PPC4XX_TLBHI_E) {
3990
            /* XXX: TO BE FIXED */
3991
            cpu_abort(env,
3992
                      "Little-endian TLB entries are not supported by now\n");
3993
        }
3994
    } else {
3995
        tlb->prot &= ~PAGE_VALID;
3996
    }
3997
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3998
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
3999
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4000
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4001
              tlb->prot & PAGE_READ ? 'r' : '-',
4002
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4003
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4004
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4005
    /* Invalidate new TLB (if valid) */
4006
    if (tlb->prot & PAGE_VALID) {
4007
        end = tlb->EPN + tlb->size;
4008
        LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4009
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4010
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4011
            tlb_flush_page(env, page);
4012
        }
4013
    }
4014
}
4015

    
4016
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4017
{
4018
    ppcemb_tlb_t *tlb;
4019

    
4020
    LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4021
              val);
4022
    entry &= PPC4XX_TLB_ENTRY_MASK;
4023
    tlb = &env->tlb[entry].tlbe;
4024
    tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4025
    tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4026
    tlb->prot = PAGE_READ;
4027
    if (val & PPC4XX_TLBLO_EX) {
4028
        tlb->prot |= PAGE_EXEC;
4029
    }
4030
    if (val & PPC4XX_TLBLO_WR) {
4031
        tlb->prot |= PAGE_WRITE;
4032
    }
4033
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4034
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4035
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4036
              tlb->prot & PAGE_READ ? 'r' : '-',
4037
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4038
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4039
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4040
}
4041

    
4042
target_ulong helper_4xx_tlbsx (target_ulong address)
4043
{
4044
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4045
}
4046

    
4047
/* PowerPC 440 TLB management */
4048
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4049
{
4050
    ppcemb_tlb_t *tlb;
4051
    target_ulong EPN, RPN, size;
4052
    int do_flush_tlbs;
4053

    
4054
    LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4055
              __func__, word, (int)entry, value);
4056
    do_flush_tlbs = 0;
4057
    entry &= 0x3F;
4058
    tlb = &env->tlb[entry].tlbe;
4059
    switch (word) {
4060
    default:
4061
        /* Just here to please gcc */
4062
    case 0:
4063
        EPN = value & 0xFFFFFC00;
4064
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4065
            do_flush_tlbs = 1;
4066
        tlb->EPN = EPN;
4067
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
4068
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4069
            do_flush_tlbs = 1;
4070
        tlb->size = size;
4071
        tlb->attr &= ~0x1;
4072
        tlb->attr |= (value >> 8) & 1;
4073
        if (value & 0x200) {
4074
            tlb->prot |= PAGE_VALID;
4075
        } else {
4076
            if (tlb->prot & PAGE_VALID) {
4077
                tlb->prot &= ~PAGE_VALID;
4078
                do_flush_tlbs = 1;
4079
            }
4080
        }
4081
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4082
        if (do_flush_tlbs)
4083
            tlb_flush(env, 1);
4084
        break;
4085
    case 1:
4086
        RPN = value & 0xFFFFFC0F;
4087
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4088
            tlb_flush(env, 1);
4089
        tlb->RPN = RPN;
4090
        break;
4091
    case 2:
4092
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4093
        tlb->prot = tlb->prot & PAGE_VALID;
4094
        if (value & 0x1)
4095
            tlb->prot |= PAGE_READ << 4;
4096
        if (value & 0x2)
4097
            tlb->prot |= PAGE_WRITE << 4;
4098
        if (value & 0x4)
4099
            tlb->prot |= PAGE_EXEC << 4;
4100
        if (value & 0x8)
4101
            tlb->prot |= PAGE_READ;
4102
        if (value & 0x10)
4103
            tlb->prot |= PAGE_WRITE;
4104
        if (value & 0x20)
4105
            tlb->prot |= PAGE_EXEC;
4106
        break;
4107
    }
4108
}
4109

    
4110
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4111
{
4112
    ppcemb_tlb_t *tlb;
4113
    target_ulong ret;
4114
    int size;
4115

    
4116
    entry &= 0x3F;
4117
    tlb = &env->tlb[entry].tlbe;
4118
    switch (word) {
4119
    default:
4120
        /* Just here to please gcc */
4121
    case 0:
4122
        ret = tlb->EPN;
4123
        size = booke_page_size_to_tlb(tlb->size);
4124
        if (size < 0 || size > 0xF)
4125
            size = 1;
4126
        ret |= size << 4;
4127
        if (tlb->attr & 0x1)
4128
            ret |= 0x100;
4129
        if (tlb->prot & PAGE_VALID)
4130
            ret |= 0x200;
4131
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4132
        env->spr[SPR_440_MMUCR] |= tlb->PID;
4133
        break;
4134
    case 1:
4135
        ret = tlb->RPN;
4136
        break;
4137
    case 2:
4138
        ret = tlb->attr & ~0x1;
4139
        if (tlb->prot & (PAGE_READ << 4))
4140
            ret |= 0x1;
4141
        if (tlb->prot & (PAGE_WRITE << 4))
4142
            ret |= 0x2;
4143
        if (tlb->prot & (PAGE_EXEC << 4))
4144
            ret |= 0x4;
4145
        if (tlb->prot & PAGE_READ)
4146
            ret |= 0x8;
4147
        if (tlb->prot & PAGE_WRITE)
4148
            ret |= 0x10;
4149
        if (tlb->prot & PAGE_EXEC)
4150
            ret |= 0x20;
4151
        break;
4152
    }
4153
    return ret;
4154
}
4155

    
4156
target_ulong helper_440_tlbsx (target_ulong address)
4157
{
4158
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4159
}
4160

    
4161
#endif /* !CONFIG_USER_ONLY */