Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ 82b323cd

History | View | Annotate | Download (124.7 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <string.h>
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "helper.h"
23

    
24
#include "helper_regs.h"
25

    
26
//#define DEBUG_OP
27
//#define DEBUG_EXCEPTIONS
28
//#define DEBUG_SOFTWARE_TLB
29

    
30
#ifdef DEBUG_SOFTWARE_TLB
31
#  define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
32
#else
33
#  define LOG_SWTLB(...) do { } while (0)
34
#endif
35

    
36

    
37
/*****************************************************************************/
38
/* Exceptions processing helpers */
39

    
40
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
41
{
42
#if 0
43
    printf("Raise exception %3x code : %d\n", exception, error_code);
44
#endif
45
    env->exception_index = exception;
46
    env->error_code = error_code;
47
    cpu_loop_exit();
48
}
49

    
50
void helper_raise_exception (uint32_t exception)
51
{
52
    helper_raise_exception_err(exception, 0);
53
}
54

    
55
/*****************************************************************************/
56
/* SPR accesses */
57
void helper_load_dump_spr (uint32_t sprn)
58
{
59
    qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
60
             env->spr[sprn]);
61
}
62

    
63
void helper_store_dump_spr (uint32_t sprn)
64
{
65
    qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
66
             env->spr[sprn]);
67
}
68

    
69
target_ulong helper_load_tbl (void)
70
{
71
    return (target_ulong)cpu_ppc_load_tbl(env);
72
}
73

    
74
target_ulong helper_load_tbu (void)
75
{
76
    return cpu_ppc_load_tbu(env);
77
}
78

    
79
target_ulong helper_load_atbl (void)
80
{
81
    return (target_ulong)cpu_ppc_load_atbl(env);
82
}
83

    
84
target_ulong helper_load_atbu (void)
85
{
86
    return cpu_ppc_load_atbu(env);
87
}
88

    
89
target_ulong helper_load_601_rtcl (void)
90
{
91
    return cpu_ppc601_load_rtcl(env);
92
}
93

    
94
target_ulong helper_load_601_rtcu (void)
95
{
96
    return cpu_ppc601_load_rtcu(env);
97
}
98

    
99
#if !defined(CONFIG_USER_ONLY)
100
#if defined (TARGET_PPC64)
101
void helper_store_asr (target_ulong val)
102
{
103
    ppc_store_asr(env, val);
104
}
105
#endif
106

    
107
void helper_store_sdr1 (target_ulong val)
108
{
109
    ppc_store_sdr1(env, val);
110
}
111

    
112
void helper_store_tbl (target_ulong val)
113
{
114
    cpu_ppc_store_tbl(env, val);
115
}
116

    
117
void helper_store_tbu (target_ulong val)
118
{
119
    cpu_ppc_store_tbu(env, val);
120
}
121

    
122
void helper_store_atbl (target_ulong val)
123
{
124
    cpu_ppc_store_atbl(env, val);
125
}
126

    
127
void helper_store_atbu (target_ulong val)
128
{
129
    cpu_ppc_store_atbu(env, val);
130
}
131

    
132
void helper_store_601_rtcl (target_ulong val)
133
{
134
    cpu_ppc601_store_rtcl(env, val);
135
}
136

    
137
void helper_store_601_rtcu (target_ulong val)
138
{
139
    cpu_ppc601_store_rtcu(env, val);
140
}
141

    
142
target_ulong helper_load_decr (void)
143
{
144
    return cpu_ppc_load_decr(env);
145
}
146

    
147
void helper_store_decr (target_ulong val)
148
{
149
    cpu_ppc_store_decr(env, val);
150
}
151

    
152
void helper_store_hid0_601 (target_ulong val)
153
{
154
    target_ulong hid0;
155

    
156
    hid0 = env->spr[SPR_HID0];
157
    if ((val ^ hid0) & 0x00000008) {
158
        /* Change current endianness */
159
        env->hflags &= ~(1 << MSR_LE);
160
        env->hflags_nmsr &= ~(1 << MSR_LE);
161
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
162
        env->hflags |= env->hflags_nmsr;
163
        qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
164
                 val & 0x8 ? 'l' : 'b', env->hflags);
165
    }
166
    env->spr[SPR_HID0] = (uint32_t)val;
167
}
168

    
169
void helper_store_403_pbr (uint32_t num, target_ulong value)
170
{
171
    if (likely(env->pb[num] != value)) {
172
        env->pb[num] = value;
173
        /* Should be optimized */
174
        tlb_flush(env, 1);
175
    }
176
}
177

    
178
target_ulong helper_load_40x_pit (void)
179
{
180
    return load_40x_pit(env);
181
}
182

    
183
void helper_store_40x_pit (target_ulong val)
184
{
185
    store_40x_pit(env, val);
186
}
187

    
188
void helper_store_40x_dbcr0 (target_ulong val)
189
{
190
    store_40x_dbcr0(env, val);
191
}
192

    
193
void helper_store_40x_sler (target_ulong val)
194
{
195
    store_40x_sler(env, val);
196
}
197

    
198
void helper_store_booke_tcr (target_ulong val)
199
{
200
    store_booke_tcr(env, val);
201
}
202

    
203
void helper_store_booke_tsr (target_ulong val)
204
{
205
    store_booke_tsr(env, val);
206
}
207

    
208
void helper_store_ibatu (uint32_t nr, target_ulong val)
209
{
210
    ppc_store_ibatu(env, nr, val);
211
}
212

    
213
void helper_store_ibatl (uint32_t nr, target_ulong val)
214
{
215
    ppc_store_ibatl(env, nr, val);
216
}
217

    
218
void helper_store_dbatu (uint32_t nr, target_ulong val)
219
{
220
    ppc_store_dbatu(env, nr, val);
221
}
222

    
223
void helper_store_dbatl (uint32_t nr, target_ulong val)
224
{
225
    ppc_store_dbatl(env, nr, val);
226
}
227

    
228
void helper_store_601_batl (uint32_t nr, target_ulong val)
229
{
230
    ppc_store_ibatl_601(env, nr, val);
231
}
232

    
233
void helper_store_601_batu (uint32_t nr, target_ulong val)
234
{
235
    ppc_store_ibatu_601(env, nr, val);
236
}
237
#endif
238

    
239
/*****************************************************************************/
240
/* Memory load and stores */
241

    
242
static inline target_ulong addr_add(target_ulong addr, target_long arg)
243
{
244
#if defined(TARGET_PPC64)
245
        if (!msr_sf)
246
            return (uint32_t)(addr + arg);
247
        else
248
#endif
249
            return addr + arg;
250
}
251

    
252
void helper_lmw (target_ulong addr, uint32_t reg)
253
{
254
    for (; reg < 32; reg++) {
255
        if (msr_le)
256
            env->gpr[reg] = bswap32(ldl(addr));
257
        else
258
            env->gpr[reg] = ldl(addr);
259
        addr = addr_add(addr, 4);
260
    }
261
}
262

    
263
void helper_stmw (target_ulong addr, uint32_t reg)
264
{
265
    for (; reg < 32; reg++) {
266
        if (msr_le)
267
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
268
        else
269
            stl(addr, (uint32_t)env->gpr[reg]);
270
        addr = addr_add(addr, 4);
271
    }
272
}
273

    
274
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
275
{
276
    int sh;
277
    for (; nb > 3; nb -= 4) {
278
        env->gpr[reg] = ldl(addr);
279
        reg = (reg + 1) % 32;
280
        addr = addr_add(addr, 4);
281
    }
282
    if (unlikely(nb > 0)) {
283
        env->gpr[reg] = 0;
284
        for (sh = 24; nb > 0; nb--, sh -= 8) {
285
            env->gpr[reg] |= ldub(addr) << sh;
286
            addr = addr_add(addr, 1);
287
        }
288
    }
289
}
290
/* PPC32 specification says we must generate an exception if
291
 * rA is in the range of registers to be loaded.
292
 * In an other hand, IBM says this is valid, but rA won't be loaded.
293
 * For now, I'll follow the spec...
294
 */
295
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
296
{
297
    if (likely(xer_bc != 0)) {
298
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
299
                     (reg < rb && (reg + xer_bc) > rb))) {
300
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
301
                                       POWERPC_EXCP_INVAL |
302
                                       POWERPC_EXCP_INVAL_LSWX);
303
        } else {
304
            helper_lsw(addr, xer_bc, reg);
305
        }
306
    }
307
}
308

    
309
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
310
{
311
    int sh;
312
    for (; nb > 3; nb -= 4) {
313
        stl(addr, env->gpr[reg]);
314
        reg = (reg + 1) % 32;
315
        addr = addr_add(addr, 4);
316
    }
317
    if (unlikely(nb > 0)) {
318
        for (sh = 24; nb > 0; nb--, sh -= 8) {
319
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
320
            addr = addr_add(addr, 1);
321
        }
322
    }
323
}
324

    
325
static void do_dcbz(target_ulong addr, int dcache_line_size)
326
{
327
    addr &= ~(dcache_line_size - 1);
328
    int i;
329
    for (i = 0 ; i < dcache_line_size ; i += 4) {
330
        stl(addr + i , 0);
331
    }
332
    if (env->reserve_addr == addr)
333
        env->reserve_addr = (target_ulong)-1ULL;
334
}
335

    
336
void helper_dcbz(target_ulong addr)
337
{
338
    do_dcbz(addr, env->dcache_line_size);
339
}
340

    
341
void helper_dcbz_970(target_ulong addr)
342
{
343
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
344
        do_dcbz(addr, 32);
345
    else
346
        do_dcbz(addr, env->dcache_line_size);
347
}
348

    
349
void helper_icbi(target_ulong addr)
350
{
351
    addr &= ~(env->dcache_line_size - 1);
352
    /* Invalidate one cache line :
353
     * PowerPC specification says this is to be treated like a load
354
     * (not a fetch) by the MMU. To be sure it will be so,
355
     * do the load "by hand".
356
     */
357
    ldl(addr);
358
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
359
}
360

    
361
// XXX: to be tested
362
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
363
{
364
    int i, c, d;
365
    d = 24;
366
    for (i = 0; i < xer_bc; i++) {
367
        c = ldub(addr);
368
        addr = addr_add(addr, 1);
369
        /* ra (if not 0) and rb are never modified */
370
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
371
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
372
        }
373
        if (unlikely(c == xer_cmp))
374
            break;
375
        if (likely(d != 0)) {
376
            d -= 8;
377
        } else {
378
            d = 24;
379
            reg++;
380
            reg = reg & 0x1F;
381
        }
382
    }
383
    return i;
384
}
385

    
386
/*****************************************************************************/
387
/* Fixed point operations helpers */
388
#if defined(TARGET_PPC64)
389

    
390
/* multiply high word */
391
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
392
{
393
    uint64_t tl, th;
394

    
395
    muls64(&tl, &th, arg1, arg2);
396
    return th;
397
}
398

    
399
/* multiply high word unsigned */
400
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
401
{
402
    uint64_t tl, th;
403

    
404
    mulu64(&tl, &th, arg1, arg2);
405
    return th;
406
}
407

    
408
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
409
{
410
    int64_t th;
411
    uint64_t tl;
412

    
413
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
414
    /* If th != 0 && th != -1, then we had an overflow */
415
    if (likely((uint64_t)(th + 1) <= 1)) {
416
        env->xer &= ~(1 << XER_OV);
417
    } else {
418
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
419
    }
420
    return (int64_t)tl;
421
}
422
#endif
423

    
424
target_ulong helper_cntlzw (target_ulong t)
425
{
426
    return clz32(t);
427
}
428

    
429
#if defined(TARGET_PPC64)
430
target_ulong helper_cntlzd (target_ulong t)
431
{
432
    return clz64(t);
433
}
434
#endif
435

    
436
/* shift right arithmetic helper */
437
target_ulong helper_sraw (target_ulong value, target_ulong shift)
438
{
439
    int32_t ret;
440

    
441
    if (likely(!(shift & 0x20))) {
442
        if (likely((uint32_t)shift != 0)) {
443
            shift &= 0x1f;
444
            ret = (int32_t)value >> shift;
445
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
446
                env->xer &= ~(1 << XER_CA);
447
            } else {
448
                env->xer |= (1 << XER_CA);
449
            }
450
        } else {
451
            ret = (int32_t)value;
452
            env->xer &= ~(1 << XER_CA);
453
        }
454
    } else {
455
        ret = (int32_t)value >> 31;
456
        if (ret) {
457
            env->xer |= (1 << XER_CA);
458
        } else {
459
            env->xer &= ~(1 << XER_CA);
460
        }
461
    }
462
    return (target_long)ret;
463
}
464

    
465
#if defined(TARGET_PPC64)
466
target_ulong helper_srad (target_ulong value, target_ulong shift)
467
{
468
    int64_t ret;
469

    
470
    if (likely(!(shift & 0x40))) {
471
        if (likely((uint64_t)shift != 0)) {
472
            shift &= 0x3f;
473
            ret = (int64_t)value >> shift;
474
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
475
                env->xer &= ~(1 << XER_CA);
476
            } else {
477
                env->xer |= (1 << XER_CA);
478
            }
479
        } else {
480
            ret = (int64_t)value;
481
            env->xer &= ~(1 << XER_CA);
482
        }
483
    } else {
484
        ret = (int64_t)value >> 63;
485
        if (ret) {
486
            env->xer |= (1 << XER_CA);
487
        } else {
488
            env->xer &= ~(1 << XER_CA);
489
        }
490
    }
491
    return ret;
492
}
493
#endif
494

    
495
target_ulong helper_popcntb (target_ulong val)
496
{
497
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
498
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
499
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
500
    return val;
501
}
502

    
503
#if defined(TARGET_PPC64)
504
target_ulong helper_popcntb_64 (target_ulong val)
505
{
506
    val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
507
    val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
508
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
509
    return val;
510
}
511
#endif
512

    
513
/*****************************************************************************/
514
/* Floating point operations helpers */
515
uint64_t helper_float32_to_float64(uint32_t arg)
516
{
517
    CPU_FloatU f;
518
    CPU_DoubleU d;
519
    f.l = arg;
520
    d.d = float32_to_float64(f.f, &env->fp_status);
521
    return d.ll;
522
}
523

    
524
uint32_t helper_float64_to_float32(uint64_t arg)
525
{
526
    CPU_FloatU f;
527
    CPU_DoubleU d;
528
    d.ll = arg;
529
    f.f = float64_to_float32(d.d, &env->fp_status);
530
    return f.l;
531
}
532

    
533
static inline int isden(float64 d)
534
{
535
    CPU_DoubleU u;
536

    
537
    u.d = d;
538

    
539
    return ((u.ll >> 52) & 0x7FF) == 0;
540
}
541

    
542
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
543
{
544
    CPU_DoubleU farg;
545
    int isneg;
546
    int ret;
547
    farg.ll = arg;
548
    isneg = float64_is_neg(farg.d);
549
    if (unlikely(float64_is_quiet_nan(farg.d))) {
550
        if (float64_is_signaling_nan(farg.d)) {
551
            /* Signaling NaN: flags are undefined */
552
            ret = 0x00;
553
        } else {
554
            /* Quiet NaN */
555
            ret = 0x11;
556
        }
557
    } else if (unlikely(float64_is_infinity(farg.d))) {
558
        /* +/- infinity */
559
        if (isneg)
560
            ret = 0x09;
561
        else
562
            ret = 0x05;
563
    } else {
564
        if (float64_is_zero(farg.d)) {
565
            /* +/- zero */
566
            if (isneg)
567
                ret = 0x12;
568
            else
569
                ret = 0x02;
570
        } else {
571
            if (isden(farg.d)) {
572
                /* Denormalized numbers */
573
                ret = 0x10;
574
            } else {
575
                /* Normalized numbers */
576
                ret = 0x00;
577
            }
578
            if (isneg) {
579
                ret |= 0x08;
580
            } else {
581
                ret |= 0x04;
582
            }
583
        }
584
    }
585
    if (set_fprf) {
586
        /* We update FPSCR_FPRF */
587
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
588
        env->fpscr |= ret << FPSCR_FPRF;
589
    }
590
    /* We just need fpcc to update Rc1 */
591
    return ret & 0xF;
592
}
593

    
594
/* Floating-point invalid operations exception */
595
static inline uint64_t fload_invalid_op_excp(int op)
596
{
597
    uint64_t ret = 0;
598
    int ve;
599

    
600
    ve = fpscr_ve;
601
    switch (op) {
602
    case POWERPC_EXCP_FP_VXSNAN:
603
        env->fpscr |= 1 << FPSCR_VXSNAN;
604
        break;
605
    case POWERPC_EXCP_FP_VXSOFT:
606
        env->fpscr |= 1 << FPSCR_VXSOFT;
607
        break;
608
    case POWERPC_EXCP_FP_VXISI:
609
        /* Magnitude subtraction of infinities */
610
        env->fpscr |= 1 << FPSCR_VXISI;
611
        goto update_arith;
612
    case POWERPC_EXCP_FP_VXIDI:
613
        /* Division of infinity by infinity */
614
        env->fpscr |= 1 << FPSCR_VXIDI;
615
        goto update_arith;
616
    case POWERPC_EXCP_FP_VXZDZ:
617
        /* Division of zero by zero */
618
        env->fpscr |= 1 << FPSCR_VXZDZ;
619
        goto update_arith;
620
    case POWERPC_EXCP_FP_VXIMZ:
621
        /* Multiplication of zero by infinity */
622
        env->fpscr |= 1 << FPSCR_VXIMZ;
623
        goto update_arith;
624
    case POWERPC_EXCP_FP_VXVC:
625
        /* Ordered comparison of NaN */
626
        env->fpscr |= 1 << FPSCR_VXVC;
627
        env->fpscr &= ~(0xF << FPSCR_FPCC);
628
        env->fpscr |= 0x11 << FPSCR_FPCC;
629
        /* We must update the target FPR before raising the exception */
630
        if (ve != 0) {
631
            env->exception_index = POWERPC_EXCP_PROGRAM;
632
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
633
            /* Update the floating-point enabled exception summary */
634
            env->fpscr |= 1 << FPSCR_FEX;
635
            /* Exception is differed */
636
            ve = 0;
637
        }
638
        break;
639
    case POWERPC_EXCP_FP_VXSQRT:
640
        /* Square root of a negative number */
641
        env->fpscr |= 1 << FPSCR_VXSQRT;
642
    update_arith:
643
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
644
        if (ve == 0) {
645
            /* Set the result to quiet NaN */
646
            ret = 0x7FF8000000000000ULL;
647
            env->fpscr &= ~(0xF << FPSCR_FPCC);
648
            env->fpscr |= 0x11 << FPSCR_FPCC;
649
        }
650
        break;
651
    case POWERPC_EXCP_FP_VXCVI:
652
        /* Invalid conversion */
653
        env->fpscr |= 1 << FPSCR_VXCVI;
654
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
655
        if (ve == 0) {
656
            /* Set the result to quiet NaN */
657
            ret = 0x7FF8000000000000ULL;
658
            env->fpscr &= ~(0xF << FPSCR_FPCC);
659
            env->fpscr |= 0x11 << FPSCR_FPCC;
660
        }
661
        break;
662
    }
663
    /* Update the floating-point invalid operation summary */
664
    env->fpscr |= 1 << FPSCR_VX;
665
    /* Update the floating-point exception summary */
666
    env->fpscr |= 1 << FPSCR_FX;
667
    if (ve != 0) {
668
        /* Update the floating-point enabled exception summary */
669
        env->fpscr |= 1 << FPSCR_FEX;
670
        if (msr_fe0 != 0 || msr_fe1 != 0)
671
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
672
    }
673
    return ret;
674
}
675

    
676
static inline void float_zero_divide_excp(void)
677
{
678
    env->fpscr |= 1 << FPSCR_ZX;
679
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
680
    /* Update the floating-point exception summary */
681
    env->fpscr |= 1 << FPSCR_FX;
682
    if (fpscr_ze != 0) {
683
        /* Update the floating-point enabled exception summary */
684
        env->fpscr |= 1 << FPSCR_FEX;
685
        if (msr_fe0 != 0 || msr_fe1 != 0) {
686
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
687
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
688
        }
689
    }
690
}
691

    
692
static inline void float_overflow_excp(void)
693
{
694
    env->fpscr |= 1 << FPSCR_OX;
695
    /* Update the floating-point exception summary */
696
    env->fpscr |= 1 << FPSCR_FX;
697
    if (fpscr_oe != 0) {
698
        /* XXX: should adjust the result */
699
        /* Update the floating-point enabled exception summary */
700
        env->fpscr |= 1 << FPSCR_FEX;
701
        /* We must update the target FPR before raising the exception */
702
        env->exception_index = POWERPC_EXCP_PROGRAM;
703
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
704
    } else {
705
        env->fpscr |= 1 << FPSCR_XX;
706
        env->fpscr |= 1 << FPSCR_FI;
707
    }
708
}
709

    
710
static inline void float_underflow_excp(void)
711
{
712
    env->fpscr |= 1 << FPSCR_UX;
713
    /* Update the floating-point exception summary */
714
    env->fpscr |= 1 << FPSCR_FX;
715
    if (fpscr_ue != 0) {
716
        /* XXX: should adjust the result */
717
        /* Update the floating-point enabled exception summary */
718
        env->fpscr |= 1 << FPSCR_FEX;
719
        /* We must update the target FPR before raising the exception */
720
        env->exception_index = POWERPC_EXCP_PROGRAM;
721
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
722
    }
723
}
724

    
725
static inline void float_inexact_excp(void)
726
{
727
    env->fpscr |= 1 << FPSCR_XX;
728
    /* Update the floating-point exception summary */
729
    env->fpscr |= 1 << FPSCR_FX;
730
    if (fpscr_xe != 0) {
731
        /* Update the floating-point enabled exception summary */
732
        env->fpscr |= 1 << FPSCR_FEX;
733
        /* We must update the target FPR before raising the exception */
734
        env->exception_index = POWERPC_EXCP_PROGRAM;
735
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
736
    }
737
}
738

    
739
static inline void fpscr_set_rounding_mode(void)
740
{
741
    int rnd_type;
742

    
743
    /* Set rounding mode */
744
    switch (fpscr_rn) {
745
    case 0:
746
        /* Best approximation (round to nearest) */
747
        rnd_type = float_round_nearest_even;
748
        break;
749
    case 1:
750
        /* Smaller magnitude (round toward zero) */
751
        rnd_type = float_round_to_zero;
752
        break;
753
    case 2:
754
        /* Round toward +infinite */
755
        rnd_type = float_round_up;
756
        break;
757
    default:
758
    case 3:
759
        /* Round toward -infinite */
760
        rnd_type = float_round_down;
761
        break;
762
    }
763
    set_float_rounding_mode(rnd_type, &env->fp_status);
764
}
765

    
766
void helper_fpscr_clrbit (uint32_t bit)
767
{
768
    int prev;
769

    
770
    prev = (env->fpscr >> bit) & 1;
771
    env->fpscr &= ~(1 << bit);
772
    if (prev == 1) {
773
        switch (bit) {
774
        case FPSCR_RN1:
775
        case FPSCR_RN:
776
            fpscr_set_rounding_mode();
777
            break;
778
        default:
779
            break;
780
        }
781
    }
782
}
783

    
784
void helper_fpscr_setbit (uint32_t bit)
785
{
786
    int prev;
787

    
788
    prev = (env->fpscr >> bit) & 1;
789
    env->fpscr |= 1 << bit;
790
    if (prev == 0) {
791
        switch (bit) {
792
        case FPSCR_VX:
793
            env->fpscr |= 1 << FPSCR_FX;
794
            if (fpscr_ve)
795
                goto raise_ve;
796
        case FPSCR_OX:
797
            env->fpscr |= 1 << FPSCR_FX;
798
            if (fpscr_oe)
799
                goto raise_oe;
800
            break;
801
        case FPSCR_UX:
802
            env->fpscr |= 1 << FPSCR_FX;
803
            if (fpscr_ue)
804
                goto raise_ue;
805
            break;
806
        case FPSCR_ZX:
807
            env->fpscr |= 1 << FPSCR_FX;
808
            if (fpscr_ze)
809
                goto raise_ze;
810
            break;
811
        case FPSCR_XX:
812
            env->fpscr |= 1 << FPSCR_FX;
813
            if (fpscr_xe)
814
                goto raise_xe;
815
            break;
816
        case FPSCR_VXSNAN:
817
        case FPSCR_VXISI:
818
        case FPSCR_VXIDI:
819
        case FPSCR_VXZDZ:
820
        case FPSCR_VXIMZ:
821
        case FPSCR_VXVC:
822
        case FPSCR_VXSOFT:
823
        case FPSCR_VXSQRT:
824
        case FPSCR_VXCVI:
825
            env->fpscr |= 1 << FPSCR_VX;
826
            env->fpscr |= 1 << FPSCR_FX;
827
            if (fpscr_ve != 0)
828
                goto raise_ve;
829
            break;
830
        case FPSCR_VE:
831
            if (fpscr_vx != 0) {
832
            raise_ve:
833
                env->error_code = POWERPC_EXCP_FP;
834
                if (fpscr_vxsnan)
835
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
836
                if (fpscr_vxisi)
837
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
838
                if (fpscr_vxidi)
839
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
840
                if (fpscr_vxzdz)
841
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
842
                if (fpscr_vximz)
843
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
844
                if (fpscr_vxvc)
845
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
846
                if (fpscr_vxsoft)
847
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
848
                if (fpscr_vxsqrt)
849
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
850
                if (fpscr_vxcvi)
851
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
852
                goto raise_excp;
853
            }
854
            break;
855
        case FPSCR_OE:
856
            if (fpscr_ox != 0) {
857
            raise_oe:
858
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
859
                goto raise_excp;
860
            }
861
            break;
862
        case FPSCR_UE:
863
            if (fpscr_ux != 0) {
864
            raise_ue:
865
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
866
                goto raise_excp;
867
            }
868
            break;
869
        case FPSCR_ZE:
870
            if (fpscr_zx != 0) {
871
            raise_ze:
872
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
873
                goto raise_excp;
874
            }
875
            break;
876
        case FPSCR_XE:
877
            if (fpscr_xx != 0) {
878
            raise_xe:
879
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
880
                goto raise_excp;
881
            }
882
            break;
883
        case FPSCR_RN1:
884
        case FPSCR_RN:
885
            fpscr_set_rounding_mode();
886
            break;
887
        default:
888
            break;
889
        raise_excp:
890
            /* Update the floating-point enabled exception summary */
891
            env->fpscr |= 1 << FPSCR_FEX;
892
                /* We have to update Rc1 before raising the exception */
893
            env->exception_index = POWERPC_EXCP_PROGRAM;
894
            break;
895
        }
896
    }
897
}
898

    
899
void helper_store_fpscr (uint64_t arg, uint32_t mask)
900
{
901
    /*
902
     * We use only the 32 LSB of the incoming fpr
903
     */
904
    uint32_t prev, new;
905
    int i;
906

    
907
    prev = env->fpscr;
908
    new = (uint32_t)arg;
909
    new &= ~0x60000000;
910
    new |= prev & 0x60000000;
911
    for (i = 0; i < 8; i++) {
912
        if (mask & (1 << i)) {
913
            env->fpscr &= ~(0xF << (4 * i));
914
            env->fpscr |= new & (0xF << (4 * i));
915
        }
916
    }
917
    /* Update VX and FEX */
918
    if (fpscr_ix != 0)
919
        env->fpscr |= 1 << FPSCR_VX;
920
    else
921
        env->fpscr &= ~(1 << FPSCR_VX);
922
    if ((fpscr_ex & fpscr_eex) != 0) {
923
        env->fpscr |= 1 << FPSCR_FEX;
924
        env->exception_index = POWERPC_EXCP_PROGRAM;
925
        /* XXX: we should compute it properly */
926
        env->error_code = POWERPC_EXCP_FP;
927
    }
928
    else
929
        env->fpscr &= ~(1 << FPSCR_FEX);
930
    fpscr_set_rounding_mode();
931
}
932

    
933
void helper_float_check_status (void)
934
{
935
#ifdef CONFIG_SOFTFLOAT
936
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
937
        (env->error_code & POWERPC_EXCP_FP)) {
938
        /* Differred floating-point exception after target FPR update */
939
        if (msr_fe0 != 0 || msr_fe1 != 0)
940
            helper_raise_exception_err(env->exception_index, env->error_code);
941
    } else {
942
        int status = get_float_exception_flags(&env->fp_status);
943
        if (status & float_flag_divbyzero) {
944
            float_zero_divide_excp();
945
        } else if (status & float_flag_overflow) {
946
            float_overflow_excp();
947
        } else if (status & float_flag_underflow) {
948
            float_underflow_excp();
949
        } else if (status & float_flag_inexact) {
950
            float_inexact_excp();
951
        }
952
    }
953
#else
954
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
955
        (env->error_code & POWERPC_EXCP_FP)) {
956
        /* Differred floating-point exception after target FPR update */
957
        if (msr_fe0 != 0 || msr_fe1 != 0)
958
            helper_raise_exception_err(env->exception_index, env->error_code);
959
    }
960
#endif
961
}
962

    
963
#ifdef CONFIG_SOFTFLOAT
964
void helper_reset_fpstatus (void)
965
{
966
    set_float_exception_flags(0, &env->fp_status);
967
}
968
#endif
969

    
970
/* fadd - fadd. */
971
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
972
{
973
    CPU_DoubleU farg1, farg2;
974

    
975
    farg1.ll = arg1;
976
    farg2.ll = arg2;
977

    
978
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
979
                 float64_is_signaling_nan(farg2.d))) {
980
        /* sNaN addition */
981
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
982
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
983
                      float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
984
        /* Magnitude subtraction of infinities */
985
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
986
    } else {
987
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
988
    }
989

    
990
    return farg1.ll;
991
}
992

    
993
/* fsub - fsub. */
994
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
995
{
996
    CPU_DoubleU farg1, farg2;
997

    
998
    farg1.ll = arg1;
999
    farg2.ll = arg2;
1000

    
1001
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1002
                 float64_is_signaling_nan(farg2.d))) {
1003
        /* sNaN subtraction */
1004
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1005
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1006
                      float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1007
        /* Magnitude subtraction of infinities */
1008
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1009
    } else {
1010
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1011
    }
1012

    
1013
    return farg1.ll;
1014
}
1015

    
1016
/* fmul - fmul. */
1017
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1018
{
1019
    CPU_DoubleU farg1, farg2;
1020

    
1021
    farg1.ll = arg1;
1022
    farg2.ll = arg2;
1023

    
1024
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1025
                 float64_is_signaling_nan(farg2.d))) {
1026
        /* sNaN multiplication */
1027
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1028
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1029
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1030
        /* Multiplication of zero by infinity */
1031
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1032
    } else {
1033
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1034
    }
1035

    
1036
    return farg1.ll;
1037
}
1038

    
1039
/* fdiv - fdiv. */
1040
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1041
{
1042
    CPU_DoubleU farg1, farg2;
1043

    
1044
    farg1.ll = arg1;
1045
    farg2.ll = arg2;
1046

    
1047
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1048
                 float64_is_signaling_nan(farg2.d))) {
1049
        /* sNaN division */
1050
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1051
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1052
        /* Division of infinity by infinity */
1053
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1054
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1055
        /* Division of zero by zero */
1056
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1057
    } else {
1058
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1059
    }
1060

    
1061
    return farg1.ll;
1062
}
1063

    
1064
/* fabs */
1065
uint64_t helper_fabs (uint64_t arg)
1066
{
1067
    CPU_DoubleU farg;
1068

    
1069
    farg.ll = arg;
1070
    farg.d = float64_abs(farg.d);
1071
    return farg.ll;
1072
}
1073

    
1074
/* fnabs */
1075
uint64_t helper_fnabs (uint64_t arg)
1076
{
1077
    CPU_DoubleU farg;
1078

    
1079
    farg.ll = arg;
1080
    farg.d = float64_abs(farg.d);
1081
    farg.d = float64_chs(farg.d);
1082
    return farg.ll;
1083
}
1084

    
1085
/* fneg */
1086
uint64_t helper_fneg (uint64_t arg)
1087
{
1088
    CPU_DoubleU farg;
1089

    
1090
    farg.ll = arg;
1091
    farg.d = float64_chs(farg.d);
1092
    return farg.ll;
1093
}
1094

    
1095
/* fctiw - fctiw. */
1096
uint64_t helper_fctiw (uint64_t arg)
1097
{
1098
    CPU_DoubleU farg;
1099
    farg.ll = arg;
1100

    
1101
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1102
        /* sNaN conversion */
1103
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1104
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1105
        /* qNan / infinity conversion */
1106
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1107
    } else {
1108
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1109
        /* XXX: higher bits are not supposed to be significant.
1110
         *     to make tests easier, return the same as a real PowerPC 750
1111
         */
1112
        farg.ll |= 0xFFF80000ULL << 32;
1113
    }
1114
    return farg.ll;
1115
}
1116

    
1117
/* fctiwz - fctiwz. */
1118
uint64_t helper_fctiwz (uint64_t arg)
1119
{
1120
    CPU_DoubleU farg;
1121
    farg.ll = arg;
1122

    
1123
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1124
        /* sNaN conversion */
1125
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1126
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1127
        /* qNan / infinity conversion */
1128
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1129
    } else {
1130
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1131
        /* XXX: higher bits are not supposed to be significant.
1132
         *     to make tests easier, return the same as a real PowerPC 750
1133
         */
1134
        farg.ll |= 0xFFF80000ULL << 32;
1135
    }
1136
    return farg.ll;
1137
}
1138

    
1139
#if defined(TARGET_PPC64)
1140
/* fcfid - fcfid. */
1141
uint64_t helper_fcfid (uint64_t arg)
1142
{
1143
    CPU_DoubleU farg;
1144
    farg.d = int64_to_float64(arg, &env->fp_status);
1145
    return farg.ll;
1146
}
1147

    
1148
/* fctid - fctid. */
1149
uint64_t helper_fctid (uint64_t arg)
1150
{
1151
    CPU_DoubleU farg;
1152
    farg.ll = arg;
1153

    
1154
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1155
        /* sNaN conversion */
1156
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1157
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1158
        /* qNan / infinity conversion */
1159
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1160
    } else {
1161
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1162
    }
1163
    return farg.ll;
1164
}
1165

    
1166
/* fctidz - fctidz. */
1167
uint64_t helper_fctidz (uint64_t arg)
1168
{
1169
    CPU_DoubleU farg;
1170
    farg.ll = arg;
1171

    
1172
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1173
        /* sNaN conversion */
1174
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1175
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1176
        /* qNan / infinity conversion */
1177
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1178
    } else {
1179
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1180
    }
1181
    return farg.ll;
1182
}
1183

    
1184
#endif
1185

    
1186
static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1187
{
1188
    CPU_DoubleU farg;
1189
    farg.ll = arg;
1190

    
1191
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1192
        /* sNaN round */
1193
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1194
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1195
        /* qNan / infinity round */
1196
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1197
    } else {
1198
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1199
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1200
        /* Restore rounding mode from FPSCR */
1201
        fpscr_set_rounding_mode();
1202
    }
1203
    return farg.ll;
1204
}
1205

    
1206
uint64_t helper_frin (uint64_t arg)
1207
{
1208
    return do_fri(arg, float_round_nearest_even);
1209
}
1210

    
1211
uint64_t helper_friz (uint64_t arg)
1212
{
1213
    return do_fri(arg, float_round_to_zero);
1214
}
1215

    
1216
uint64_t helper_frip (uint64_t arg)
1217
{
1218
    return do_fri(arg, float_round_up);
1219
}
1220

    
1221
uint64_t helper_frim (uint64_t arg)
1222
{
1223
    return do_fri(arg, float_round_down);
1224
}
1225

    
1226
/* fmadd - fmadd. */
1227
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1228
{
1229
    CPU_DoubleU farg1, farg2, farg3;
1230

    
1231
    farg1.ll = arg1;
1232
    farg2.ll = arg2;
1233
    farg3.ll = arg3;
1234

    
1235
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1236
                 float64_is_signaling_nan(farg2.d) ||
1237
                 float64_is_signaling_nan(farg3.d))) {
1238
        /* sNaN operation */
1239
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1240
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1241
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1242
        /* Multiplication of zero by infinity */
1243
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1244
    } else {
1245
#ifdef FLOAT128
1246
        /* This is the way the PowerPC specification defines it */
1247
        float128 ft0_128, ft1_128;
1248

    
1249
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1250
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1251
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1252
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1253
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1254
            /* Magnitude subtraction of infinities */
1255
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1256
        } else {
1257
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1258
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1259
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1260
        }
1261
#else
1262
        /* This is OK on x86 hosts */
1263
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1264
#endif
1265
    }
1266

    
1267
    return farg1.ll;
1268
}
1269

    
1270
/* fmsub - fmsub. */
1271
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1272
{
1273
    CPU_DoubleU farg1, farg2, farg3;
1274

    
1275
    farg1.ll = arg1;
1276
    farg2.ll = arg2;
1277
    farg3.ll = arg3;
1278

    
1279
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1280
                 float64_is_signaling_nan(farg2.d) ||
1281
                 float64_is_signaling_nan(farg3.d))) {
1282
        /* sNaN operation */
1283
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1284
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1285
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1286
        /* Multiplication of zero by infinity */
1287
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1288
    } else {
1289
#ifdef FLOAT128
1290
        /* This is the way the PowerPC specification defines it */
1291
        float128 ft0_128, ft1_128;
1292

    
1293
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1294
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1295
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1296
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1297
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1298
            /* Magnitude subtraction of infinities */
1299
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1300
        } else {
1301
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1302
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1303
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1304
        }
1305
#else
1306
        /* This is OK on x86 hosts */
1307
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1308
#endif
1309
    }
1310
    return farg1.ll;
1311
}
1312

    
1313
/* fnmadd - fnmadd. */
1314
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1315
{
1316
    CPU_DoubleU farg1, farg2, farg3;
1317

    
1318
    farg1.ll = arg1;
1319
    farg2.ll = arg2;
1320
    farg3.ll = arg3;
1321

    
1322
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1323
                 float64_is_signaling_nan(farg2.d) ||
1324
                 float64_is_signaling_nan(farg3.d))) {
1325
        /* sNaN operation */
1326
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1327
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1328
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1329
        /* Multiplication of zero by infinity */
1330
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1331
    } else {
1332
#ifdef FLOAT128
1333
        /* This is the way the PowerPC specification defines it */
1334
        float128 ft0_128, ft1_128;
1335

    
1336
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1337
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1338
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1339
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1340
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1341
            /* Magnitude subtraction of infinities */
1342
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1343
        } else {
1344
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1345
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1346
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1347
        }
1348
#else
1349
        /* This is OK on x86 hosts */
1350
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1351
#endif
1352
        if (likely(!float64_is_quiet_nan(farg1.d)))
1353
            farg1.d = float64_chs(farg1.d);
1354
    }
1355
    return farg1.ll;
1356
}
1357

    
1358
/* fnmsub - fnmsub. */
1359
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1360
{
1361
    CPU_DoubleU farg1, farg2, farg3;
1362

    
1363
    farg1.ll = arg1;
1364
    farg2.ll = arg2;
1365
    farg3.ll = arg3;
1366

    
1367
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1368
                 float64_is_signaling_nan(farg2.d) ||
1369
                 float64_is_signaling_nan(farg3.d))) {
1370
        /* sNaN operation */
1371
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1372
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1373
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1374
        /* Multiplication of zero by infinity */
1375
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1376
    } else {
1377
#ifdef FLOAT128
1378
        /* This is the way the PowerPC specification defines it */
1379
        float128 ft0_128, ft1_128;
1380

    
1381
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1382
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1383
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1384
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1385
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1386
            /* Magnitude subtraction of infinities */
1387
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1388
        } else {
1389
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1390
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1391
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1392
        }
1393
#else
1394
        /* This is OK on x86 hosts */
1395
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1396
#endif
1397
        if (likely(!float64_is_quiet_nan(farg1.d)))
1398
            farg1.d = float64_chs(farg1.d);
1399
    }
1400
    return farg1.ll;
1401
}
1402

    
1403
/* frsp - frsp. */
1404
uint64_t helper_frsp (uint64_t arg)
1405
{
1406
    CPU_DoubleU farg;
1407
    float32 f32;
1408
    farg.ll = arg;
1409

    
1410
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1411
        /* sNaN square root */
1412
       farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1413
    } else {
1414
       f32 = float64_to_float32(farg.d, &env->fp_status);
1415
       farg.d = float32_to_float64(f32, &env->fp_status);
1416
    }
1417
    return farg.ll;
1418
}
1419

    
1420
/* fsqrt - fsqrt. */
1421
uint64_t helper_fsqrt (uint64_t arg)
1422
{
1423
    CPU_DoubleU farg;
1424
    farg.ll = arg;
1425

    
1426
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1427
        /* sNaN square root */
1428
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1429
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1430
        /* Square root of a negative nonzero number */
1431
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1432
    } else {
1433
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1434
    }
1435
    return farg.ll;
1436
}
1437

    
1438
/* fre - fre. */
1439
uint64_t helper_fre (uint64_t arg)
1440
{
1441
    CPU_DoubleU farg;
1442
    farg.ll = arg;
1443

    
1444
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1445
        /* sNaN reciprocal */
1446
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1447
    } else {
1448
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1449
    }
1450
    return farg.d;
1451
}
1452

    
1453
/* fres - fres. */
1454
uint64_t helper_fres (uint64_t arg)
1455
{
1456
    CPU_DoubleU farg;
1457
    float32 f32;
1458
    farg.ll = arg;
1459

    
1460
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1461
        /* sNaN reciprocal */
1462
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1463
    } else {
1464
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1465
        f32 = float64_to_float32(farg.d, &env->fp_status);
1466
        farg.d = float32_to_float64(f32, &env->fp_status);
1467
    }
1468
    return farg.ll;
1469
}
1470

    
1471
/* frsqrte  - frsqrte. */
1472
uint64_t helper_frsqrte (uint64_t arg)
1473
{
1474
    CPU_DoubleU farg;
1475
    float32 f32;
1476
    farg.ll = arg;
1477

    
1478
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1479
        /* sNaN reciprocal square root */
1480
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1481
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1482
        /* Reciprocal square root of a negative nonzero number */
1483
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1484
    } else {
1485
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1486
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1487
        f32 = float64_to_float32(farg.d, &env->fp_status);
1488
        farg.d = float32_to_float64(f32, &env->fp_status);
1489
    }
1490
    return farg.ll;
1491
}
1492

    
1493
/* fsel - fsel. */
1494
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1495
{
1496
    CPU_DoubleU farg1;
1497

    
1498
    farg1.ll = arg1;
1499

    
1500
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_quiet_nan(farg1.d))
1501
        return arg2;
1502
    else
1503
        return arg3;
1504
}
1505

    
1506
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1507
{
1508
    CPU_DoubleU farg1, farg2;
1509
    uint32_t ret = 0;
1510
    farg1.ll = arg1;
1511
    farg2.ll = arg2;
1512

    
1513
    if (unlikely(float64_is_quiet_nan(farg1.d) ||
1514
                 float64_is_quiet_nan(farg2.d))) {
1515
        ret = 0x01UL;
1516
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1517
        ret = 0x08UL;
1518
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1519
        ret = 0x04UL;
1520
    } else {
1521
        ret = 0x02UL;
1522
    }
1523

    
1524
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1525
    env->fpscr |= ret << FPSCR_FPRF;
1526
    env->crf[crfD] = ret;
1527
    if (unlikely(ret == 0x01UL
1528
                 && (float64_is_signaling_nan(farg1.d) ||
1529
                     float64_is_signaling_nan(farg2.d)))) {
1530
        /* sNaN comparison */
1531
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1532
    }
1533
}
1534

    
1535
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1536
{
1537
    CPU_DoubleU farg1, farg2;
1538
    uint32_t ret = 0;
1539
    farg1.ll = arg1;
1540
    farg2.ll = arg2;
1541

    
1542
    if (unlikely(float64_is_quiet_nan(farg1.d) ||
1543
                 float64_is_quiet_nan(farg2.d))) {
1544
        ret = 0x01UL;
1545
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1546
        ret = 0x08UL;
1547
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1548
        ret = 0x04UL;
1549
    } else {
1550
        ret = 0x02UL;
1551
    }
1552

    
1553
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1554
    env->fpscr |= ret << FPSCR_FPRF;
1555
    env->crf[crfD] = ret;
1556
    if (unlikely (ret == 0x01UL)) {
1557
        if (float64_is_signaling_nan(farg1.d) ||
1558
            float64_is_signaling_nan(farg2.d)) {
1559
            /* sNaN comparison */
1560
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1561
                                  POWERPC_EXCP_FP_VXVC);
1562
        } else {
1563
            /* qNaN comparison */
1564
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1565
        }
1566
    }
1567
}
1568

    
1569
#if !defined (CONFIG_USER_ONLY)
1570
void helper_store_msr (target_ulong val)
1571
{
1572
    val = hreg_store_msr(env, val, 0);
1573
    if (val != 0) {
1574
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1575
        helper_raise_exception(val);
1576
    }
1577
}
1578

    
1579
static inline void do_rfi(target_ulong nip, target_ulong msr,
1580
                          target_ulong msrm, int keep_msrh)
1581
{
1582
#if defined(TARGET_PPC64)
1583
    if (msr & (1ULL << MSR_SF)) {
1584
        nip = (uint64_t)nip;
1585
        msr &= (uint64_t)msrm;
1586
    } else {
1587
        nip = (uint32_t)nip;
1588
        msr = (uint32_t)(msr & msrm);
1589
        if (keep_msrh)
1590
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1591
    }
1592
#else
1593
    nip = (uint32_t)nip;
1594
    msr &= (uint32_t)msrm;
1595
#endif
1596
    /* XXX: beware: this is false if VLE is supported */
1597
    env->nip = nip & ~((target_ulong)0x00000003);
1598
    hreg_store_msr(env, msr, 1);
1599
#if defined (DEBUG_OP)
1600
    cpu_dump_rfi(env->nip, env->msr);
1601
#endif
1602
    /* No need to raise an exception here,
1603
     * as rfi is always the last insn of a TB
1604
     */
1605
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1606
}
1607

    
1608
void helper_rfi (void)
1609
{
1610
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1611
           ~((target_ulong)0x783F0000), 1);
1612
}
1613

    
1614
#if defined(TARGET_PPC64)
1615
void helper_rfid (void)
1616
{
1617
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1618
           ~((target_ulong)0x783F0000), 0);
1619
}
1620

    
1621
void helper_hrfid (void)
1622
{
1623
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1624
           ~((target_ulong)0x783F0000), 0);
1625
}
1626
#endif
1627
#endif
1628

    
1629
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1630
{
1631
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1632
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1633
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1634
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1635
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1636
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1637
    }
1638
}
1639

    
1640
#if defined(TARGET_PPC64)
1641
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1642
{
1643
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1644
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1645
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1646
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1647
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1648
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1649
}
1650
#endif
1651

    
1652
/*****************************************************************************/
1653
/* PowerPC 601 specific instructions (POWER bridge) */
1654

    
1655
target_ulong helper_clcs (uint32_t arg)
1656
{
1657
    switch (arg) {
1658
    case 0x0CUL:
1659
        /* Instruction cache line size */
1660
        return env->icache_line_size;
1661
        break;
1662
    case 0x0DUL:
1663
        /* Data cache line size */
1664
        return env->dcache_line_size;
1665
        break;
1666
    case 0x0EUL:
1667
        /* Minimum cache line size */
1668
        return (env->icache_line_size < env->dcache_line_size) ?
1669
                env->icache_line_size : env->dcache_line_size;
1670
        break;
1671
    case 0x0FUL:
1672
        /* Maximum cache line size */
1673
        return (env->icache_line_size > env->dcache_line_size) ?
1674
                env->icache_line_size : env->dcache_line_size;
1675
        break;
1676
    default:
1677
        /* Undefined */
1678
        return 0;
1679
        break;
1680
    }
1681
}
1682

    
1683
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1684
{
1685
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1686

    
1687
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1688
        (int32_t)arg2 == 0) {
1689
        env->spr[SPR_MQ] = 0;
1690
        return INT32_MIN;
1691
    } else {
1692
        env->spr[SPR_MQ] = tmp % arg2;
1693
        return  tmp / (int32_t)arg2;
1694
    }
1695
}
1696

    
1697
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1698
{
1699
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1700

    
1701
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1702
        (int32_t)arg2 == 0) {
1703
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1704
        env->spr[SPR_MQ] = 0;
1705
        return INT32_MIN;
1706
    } else {
1707
        env->spr[SPR_MQ] = tmp % arg2;
1708
        tmp /= (int32_t)arg2;
1709
        if ((int32_t)tmp != tmp) {
1710
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1711
        } else {
1712
            env->xer &= ~(1 << XER_OV);
1713
        }
1714
        return tmp;
1715
    }
1716
}
1717

    
1718
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1719
{
1720
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1721
        (int32_t)arg2 == 0) {
1722
        env->spr[SPR_MQ] = 0;
1723
        return INT32_MIN;
1724
    } else {
1725
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1726
        return (int32_t)arg1 / (int32_t)arg2;
1727
    }
1728
}
1729

    
1730
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1731
{
1732
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1733
        (int32_t)arg2 == 0) {
1734
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1735
        env->spr[SPR_MQ] = 0;
1736
        return INT32_MIN;
1737
    } else {
1738
        env->xer &= ~(1 << XER_OV);
1739
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1740
        return (int32_t)arg1 / (int32_t)arg2;
1741
    }
1742
}
1743

    
1744
#if !defined (CONFIG_USER_ONLY)
1745
target_ulong helper_rac (target_ulong addr)
1746
{
1747
    mmu_ctx_t ctx;
1748
    int nb_BATs;
1749
    target_ulong ret = 0;
1750

    
1751
    /* We don't have to generate many instances of this instruction,
1752
     * as rac is supervisor only.
1753
     */
1754
    /* XXX: FIX THIS: Pretend we have no BAT */
1755
    nb_BATs = env->nb_BATs;
1756
    env->nb_BATs = 0;
1757
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1758
        ret = ctx.raddr;
1759
    env->nb_BATs = nb_BATs;
1760
    return ret;
1761
}
1762

    
1763
void helper_rfsvc (void)
1764
{
1765
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1766
}
1767
#endif
1768

    
1769
/*****************************************************************************/
1770
/* 602 specific instructions */
1771
/* mfrom is the most crazy instruction ever seen, imho ! */
1772
/* Real implementation uses a ROM table. Do the same */
1773
/* Extremly decomposed:
1774
 *                      -arg / 256
1775
 * return 256 * log10(10           + 1.0) + 0.5
1776
 */
1777
#if !defined (CONFIG_USER_ONLY)
1778
target_ulong helper_602_mfrom (target_ulong arg)
1779
{
1780
    if (likely(arg < 602)) {
1781
#include "mfrom_table.c"
1782
        return mfrom_ROM_table[arg];
1783
    } else {
1784
        return 0;
1785
    }
1786
}
1787
#endif
1788

    
1789
/*****************************************************************************/
1790
/* Embedded PowerPC specific helpers */
1791

    
1792
/* XXX: to be improved to check access rights when in user-mode */
1793
target_ulong helper_load_dcr (target_ulong dcrn)
1794
{
1795
    uint32_t val = 0;
1796

    
1797
    if (unlikely(env->dcr_env == NULL)) {
1798
        qemu_log("No DCR environment\n");
1799
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1800
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1801
    } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1802
        qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1803
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1804
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1805
    }
1806
    return val;
1807
}
1808

    
1809
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1810
{
1811
    if (unlikely(env->dcr_env == NULL)) {
1812
        qemu_log("No DCR environment\n");
1813
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1814
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1815
    } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1816
        qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1817
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1818
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1819
    }
1820
}
1821

    
1822
#if !defined(CONFIG_USER_ONLY)
1823
void helper_40x_rfci (void)
1824
{
1825
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1826
           ~((target_ulong)0xFFFF0000), 0);
1827
}
1828

    
1829
void helper_rfci (void)
1830
{
1831
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1832
           ~((target_ulong)0x3FFF0000), 0);
1833
}
1834

    
1835
void helper_rfdi (void)
1836
{
1837
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1838
           ~((target_ulong)0x3FFF0000), 0);
1839
}
1840

    
1841
void helper_rfmci (void)
1842
{
1843
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1844
           ~((target_ulong)0x3FFF0000), 0);
1845
}
1846
#endif
1847

    
1848
/* 440 specific */
1849
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1850
{
1851
    target_ulong mask;
1852
    int i;
1853

    
1854
    i = 1;
1855
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1856
        if ((high & mask) == 0) {
1857
            if (update_Rc) {
1858
                env->crf[0] = 0x4;
1859
            }
1860
            goto done;
1861
        }
1862
        i++;
1863
    }
1864
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1865
        if ((low & mask) == 0) {
1866
            if (update_Rc) {
1867
                env->crf[0] = 0x8;
1868
            }
1869
            goto done;
1870
        }
1871
        i++;
1872
    }
1873
    if (update_Rc) {
1874
        env->crf[0] = 0x2;
1875
    }
1876
 done:
1877
    env->xer = (env->xer & ~0x7F) | i;
1878
    if (update_Rc) {
1879
        env->crf[0] |= xer_so;
1880
    }
1881
    return i;
1882
}
1883

    
1884
/*****************************************************************************/
1885
/* Altivec extension helpers */
1886
#if defined(HOST_WORDS_BIGENDIAN)
1887
#define HI_IDX 0
1888
#define LO_IDX 1
1889
#else
1890
#define HI_IDX 1
1891
#define LO_IDX 0
1892
#endif
1893

    
1894
#if defined(HOST_WORDS_BIGENDIAN)
1895
#define VECTOR_FOR_INORDER_I(index, element)            \
1896
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1897
#else
1898
#define VECTOR_FOR_INORDER_I(index, element)            \
1899
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1900
#endif
1901

    
1902
/* If X is a NaN, store the corresponding QNaN into RESULT.  Otherwise,
1903
 * execute the following block.  */
1904
#define DO_HANDLE_NAN(result, x)                \
1905
    if (float32_is_any_nan(x)) {                                \
1906
        CPU_FloatU __f;                                         \
1907
        __f.f = x;                                              \
1908
        __f.l = __f.l | (1 << 22);  /* Set QNaN bit. */         \
1909
        result = __f.f;                                         \
1910
    } else
1911

    
1912
#define HANDLE_NAN1(result, x)                  \
1913
    DO_HANDLE_NAN(result, x)
1914
#define HANDLE_NAN2(result, x, y)               \
1915
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1916
#define HANDLE_NAN3(result, x, y, z)            \
1917
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1918

    
1919
/* Saturating arithmetic helpers.  */
1920
#define SATCVT(from, to, from_type, to_type, min, max)                  \
1921
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1922
    {                                                                   \
1923
        to_type r;                                                      \
1924
        if (x < (from_type)min) {                                       \
1925
            r = min;                                                    \
1926
            *sat = 1;                                                   \
1927
        } else if (x > (from_type)max) {                                \
1928
            r = max;                                                    \
1929
            *sat = 1;                                                   \
1930
        } else {                                                        \
1931
            r = x;                                                      \
1932
        }                                                               \
1933
        return r;                                                       \
1934
    }
1935
#define SATCVTU(from, to, from_type, to_type, min, max)                 \
1936
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1937
    {                                                                   \
1938
        to_type r;                                                      \
1939
        if (x > (from_type)max) {                                       \
1940
            r = max;                                                    \
1941
            *sat = 1;                                                   \
1942
        } else {                                                        \
1943
            r = x;                                                      \
1944
        }                                                               \
1945
        return r;                                                       \
1946
    }
1947
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
1948
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
1949
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
1950

    
1951
SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
1952
SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
1953
SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
1954
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
1955
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
1956
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
1957
#undef SATCVT
1958
#undef SATCVTU
1959

    
1960
#define LVE(name, access, swap, element)                        \
1961
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
1962
    {                                                           \
1963
        size_t n_elems = ARRAY_SIZE(r->element);                \
1964
        int adjust = HI_IDX*(n_elems-1);                        \
1965
        int sh = sizeof(r->element[0]) >> 1;                    \
1966
        int index = (addr & 0xf) >> sh;                         \
1967
        if(msr_le) {                                            \
1968
            r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
1969
        } else {                                                        \
1970
            r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
1971
        }                                                               \
1972
    }
1973
#define I(x) (x)
1974
LVE(lvebx, ldub, I, u8)
1975
LVE(lvehx, lduw, bswap16, u16)
1976
LVE(lvewx, ldl, bswap32, u32)
1977
#undef I
1978
#undef LVE
1979

    
1980
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
1981
{
1982
    int i, j = (sh & 0xf);
1983

    
1984
    VECTOR_FOR_INORDER_I (i, u8) {
1985
        r->u8[i] = j++;
1986
    }
1987
}
1988

    
1989
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
1990
{
1991
    int i, j = 0x10 - (sh & 0xf);
1992

    
1993
    VECTOR_FOR_INORDER_I (i, u8) {
1994
        r->u8[i] = j++;
1995
    }
1996
}
1997

    
1998
#define STVE(name, access, swap, element)                       \
1999
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2000
    {                                                           \
2001
        size_t n_elems = ARRAY_SIZE(r->element);                \
2002
        int adjust = HI_IDX*(n_elems-1);                        \
2003
        int sh = sizeof(r->element[0]) >> 1;                    \
2004
        int index = (addr & 0xf) >> sh;                         \
2005
        if(msr_le) {                                            \
2006
            access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2007
        } else {                                                        \
2008
            access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2009
        }                                                               \
2010
    }
2011
#define I(x) (x)
2012
STVE(stvebx, stb, I, u8)
2013
STVE(stvehx, stw, bswap16, u16)
2014
STVE(stvewx, stl, bswap32, u32)
2015
#undef I
2016
#undef LVE
2017

    
2018
void helper_mtvscr (ppc_avr_t *r)
2019
{
2020
#if defined(HOST_WORDS_BIGENDIAN)
2021
    env->vscr = r->u32[3];
2022
#else
2023
    env->vscr = r->u32[0];
2024
#endif
2025
    set_flush_to_zero(vscr_nj, &env->vec_status);
2026
}
2027

    
2028
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2029
{
2030
    int i;
2031
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2032
        r->u32[i] = ~a->u32[i] < b->u32[i];
2033
    }
2034
}
2035

    
2036
#define VARITH_DO(name, op, element)        \
2037
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2038
{                                                                       \
2039
    int i;                                                              \
2040
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2041
        r->element[i] = a->element[i] op b->element[i];                 \
2042
    }                                                                   \
2043
}
2044
#define VARITH(suffix, element)                  \
2045
  VARITH_DO(add##suffix, +, element)             \
2046
  VARITH_DO(sub##suffix, -, element)
2047
VARITH(ubm, u8)
2048
VARITH(uhm, u16)
2049
VARITH(uwm, u32)
2050
#undef VARITH_DO
2051
#undef VARITH
2052

    
2053
#define VARITHFP(suffix, func)                                          \
2054
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2055
    {                                                                   \
2056
        int i;                                                          \
2057
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2058
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2059
                r->f[i] = func(a->f[i], b->f[i], &env->vec_status);     \
2060
            }                                                           \
2061
        }                                                               \
2062
    }
2063
VARITHFP(addfp, float32_add)
2064
VARITHFP(subfp, float32_sub)
2065
#undef VARITHFP
2066

    
2067
#define VARITHSAT_CASE(type, op, cvt, element)                          \
2068
    {                                                                   \
2069
        type result = (type)a->element[i] op (type)b->element[i];       \
2070
        r->element[i] = cvt(result, &sat);                              \
2071
    }
2072

    
2073
#define VARITHSAT_DO(name, op, optype, cvt, element)                    \
2074
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2075
    {                                                                   \
2076
        int sat = 0;                                                    \
2077
        int i;                                                          \
2078
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2079
            switch (sizeof(r->element[0])) {                            \
2080
            case 1: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2081
            case 2: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2082
            case 4: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2083
            }                                                           \
2084
        }                                                               \
2085
        if (sat) {                                                      \
2086
            env->vscr |= (1 << VSCR_SAT);                               \
2087
        }                                                               \
2088
    }
2089
#define VARITHSAT_SIGNED(suffix, element, optype, cvt)        \
2090
    VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)    \
2091
    VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2092
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)       \
2093
    VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)     \
2094
    VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2095
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2096
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2097
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2098
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2099
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2100
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2101
#undef VARITHSAT_CASE
2102
#undef VARITHSAT_DO
2103
#undef VARITHSAT_SIGNED
2104
#undef VARITHSAT_UNSIGNED
2105

    
2106
#define VAVG_DO(name, element, etype)                                   \
2107
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2108
    {                                                                   \
2109
        int i;                                                          \
2110
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2111
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2112
            r->element[i] = x >> 1;                                     \
2113
        }                                                               \
2114
    }
2115

    
2116
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2117
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2118
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2119
VAVG(b, s8, int16_t, u8, uint16_t)
2120
VAVG(h, s16, int32_t, u16, uint32_t)
2121
VAVG(w, s32, int64_t, u32, uint64_t)
2122
#undef VAVG_DO
2123
#undef VAVG
2124

    
2125
#define VCF(suffix, cvt, element)                                       \
2126
    void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2127
    {                                                                   \
2128
        int i;                                                          \
2129
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2130
            float32 t = cvt(b->element[i], &env->vec_status);           \
2131
            r->f[i] = float32_scalbn (t, -uim, &env->vec_status);       \
2132
        }                                                               \
2133
    }
2134
VCF(ux, uint32_to_float32, u32)
2135
VCF(sx, int32_to_float32, s32)
2136
#undef VCF
2137

    
2138
#define VCMP_DO(suffix, compare, element, record)                       \
2139
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2140
    {                                                                   \
2141
        uint32_t ones = (uint32_t)-1;                                   \
2142
        uint32_t all = ones;                                            \
2143
        uint32_t none = 0;                                              \
2144
        int i;                                                          \
2145
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2146
            uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2147
            switch (sizeof (a->element[0])) {                           \
2148
            case 4: r->u32[i] = result; break;                          \
2149
            case 2: r->u16[i] = result; break;                          \
2150
            case 1: r->u8[i] = result; break;                           \
2151
            }                                                           \
2152
            all &= result;                                              \
2153
            none |= result;                                             \
2154
        }                                                               \
2155
        if (record) {                                                   \
2156
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2157
        }                                                               \
2158
    }
2159
#define VCMP(suffix, compare, element)          \
2160
    VCMP_DO(suffix, compare, element, 0)        \
2161
    VCMP_DO(suffix##_dot, compare, element, 1)
2162
VCMP(equb, ==, u8)
2163
VCMP(equh, ==, u16)
2164
VCMP(equw, ==, u32)
2165
VCMP(gtub, >, u8)
2166
VCMP(gtuh, >, u16)
2167
VCMP(gtuw, >, u32)
2168
VCMP(gtsb, >, s8)
2169
VCMP(gtsh, >, s16)
2170
VCMP(gtsw, >, s32)
2171
#undef VCMP_DO
2172
#undef VCMP
2173

    
2174
#define VCMPFP_DO(suffix, compare, order, record)                       \
2175
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2176
    {                                                                   \
2177
        uint32_t ones = (uint32_t)-1;                                   \
2178
        uint32_t all = ones;                                            \
2179
        uint32_t none = 0;                                              \
2180
        int i;                                                          \
2181
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2182
            uint32_t result;                                            \
2183
            int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2184
            if (rel == float_relation_unordered) {                      \
2185
                result = 0;                                             \
2186
            } else if (rel compare order) {                             \
2187
                result = ones;                                          \
2188
            } else {                                                    \
2189
                result = 0;                                             \
2190
            }                                                           \
2191
            r->u32[i] = result;                                         \
2192
            all &= result;                                              \
2193
            none |= result;                                             \
2194
        }                                                               \
2195
        if (record) {                                                   \
2196
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2197
        }                                                               \
2198
    }
2199
#define VCMPFP(suffix, compare, order)           \
2200
    VCMPFP_DO(suffix, compare, order, 0)         \
2201
    VCMPFP_DO(suffix##_dot, compare, order, 1)
2202
VCMPFP(eqfp, ==, float_relation_equal)
2203
VCMPFP(gefp, !=, float_relation_less)
2204
VCMPFP(gtfp, ==, float_relation_greater)
2205
#undef VCMPFP_DO
2206
#undef VCMPFP
2207

    
2208
static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2209
                                    int record)
2210
{
2211
    int i;
2212
    int all_in = 0;
2213
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2214
        int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2215
        if (le_rel == float_relation_unordered) {
2216
            r->u32[i] = 0xc0000000;
2217
            /* ALL_IN does not need to be updated here.  */
2218
        } else {
2219
            float32 bneg = float32_chs(b->f[i]);
2220
            int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2221
            int le = le_rel != float_relation_greater;
2222
            int ge = ge_rel != float_relation_less;
2223
            r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2224
            all_in |= (!le | !ge);
2225
        }
2226
    }
2227
    if (record) {
2228
        env->crf[6] = (all_in == 0) << 1;
2229
    }
2230
}
2231

    
2232
void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2233
{
2234
    vcmpbfp_internal(r, a, b, 0);
2235
}
2236

    
2237
void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2238
{
2239
    vcmpbfp_internal(r, a, b, 1);
2240
}
2241

    
2242
#define VCT(suffix, satcvt, element)                                    \
2243
    void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2244
    {                                                                   \
2245
        int i;                                                          \
2246
        int sat = 0;                                                    \
2247
        float_status s = env->vec_status;                               \
2248
        set_float_rounding_mode(float_round_to_zero, &s);               \
2249
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2250
            if (float32_is_any_nan(b->f[i])) {                          \
2251
                r->element[i] = 0;                                      \
2252
            } else {                                                    \
2253
                float64 t = float32_to_float64(b->f[i], &s);            \
2254
                int64_t j;                                              \
2255
                t = float64_scalbn(t, uim, &s);                         \
2256
                j = float64_to_int64(t, &s);                            \
2257
                r->element[i] = satcvt(j, &sat);                        \
2258
            }                                                           \
2259
        }                                                               \
2260
        if (sat) {                                                      \
2261
            env->vscr |= (1 << VSCR_SAT);                               \
2262
        }                                                               \
2263
    }
2264
VCT(uxs, cvtsduw, u32)
2265
VCT(sxs, cvtsdsw, s32)
2266
#undef VCT
2267

    
2268
void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2269
{
2270
    int i;
2271
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2272
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2273
            /* Need to do the computation in higher precision and round
2274
             * once at the end.  */
2275
            float64 af, bf, cf, t;
2276
            af = float32_to_float64(a->f[i], &env->vec_status);
2277
            bf = float32_to_float64(b->f[i], &env->vec_status);
2278
            cf = float32_to_float64(c->f[i], &env->vec_status);
2279
            t = float64_mul(af, cf, &env->vec_status);
2280
            t = float64_add(t, bf, &env->vec_status);
2281
            r->f[i] = float64_to_float32(t, &env->vec_status);
2282
        }
2283
    }
2284
}
2285

    
2286
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2287
{
2288
    int sat = 0;
2289
    int i;
2290

    
2291
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2292
        int32_t prod = a->s16[i] * b->s16[i];
2293
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2294
        r->s16[i] = cvtswsh (t, &sat);
2295
    }
2296

    
2297
    if (sat) {
2298
        env->vscr |= (1 << VSCR_SAT);
2299
    }
2300
}
2301

    
2302
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2303
{
2304
    int sat = 0;
2305
    int i;
2306

    
2307
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2308
        int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2309
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2310
        r->s16[i] = cvtswsh (t, &sat);
2311
    }
2312

    
2313
    if (sat) {
2314
        env->vscr |= (1 << VSCR_SAT);
2315
    }
2316
}
2317

    
2318
#define VMINMAX_DO(name, compare, element)                              \
2319
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2320
    {                                                                   \
2321
        int i;                                                          \
2322
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2323
            if (a->element[i] compare b->element[i]) {                  \
2324
                r->element[i] = b->element[i];                          \
2325
            } else {                                                    \
2326
                r->element[i] = a->element[i];                          \
2327
            }                                                           \
2328
        }                                                               \
2329
    }
2330
#define VMINMAX(suffix, element)                \
2331
  VMINMAX_DO(min##suffix, >, element)           \
2332
  VMINMAX_DO(max##suffix, <, element)
2333
VMINMAX(sb, s8)
2334
VMINMAX(sh, s16)
2335
VMINMAX(sw, s32)
2336
VMINMAX(ub, u8)
2337
VMINMAX(uh, u16)
2338
VMINMAX(uw, u32)
2339
#undef VMINMAX_DO
2340
#undef VMINMAX
2341

    
2342
#define VMINMAXFP(suffix, rT, rF)                                       \
2343
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2344
    {                                                                   \
2345
        int i;                                                          \
2346
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2347
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2348
                if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2349
                    r->f[i] = rT->f[i];                                 \
2350
                } else {                                                \
2351
                    r->f[i] = rF->f[i];                                 \
2352
                }                                                       \
2353
            }                                                           \
2354
        }                                                               \
2355
    }
2356
VMINMAXFP(minfp, a, b)
2357
VMINMAXFP(maxfp, b, a)
2358
#undef VMINMAXFP
2359

    
2360
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2361
{
2362
    int i;
2363
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2364
        int32_t prod = a->s16[i] * b->s16[i];
2365
        r->s16[i] = (int16_t) (prod + c->s16[i]);
2366
    }
2367
}
2368

    
2369
#define VMRG_DO(name, element, highp)                                   \
2370
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2371
    {                                                                   \
2372
        ppc_avr_t result;                                               \
2373
        int i;                                                          \
2374
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2375
        for (i = 0; i < n_elems/2; i++) {                               \
2376
            if (highp) {                                                \
2377
                result.element[i*2+HI_IDX] = a->element[i];             \
2378
                result.element[i*2+LO_IDX] = b->element[i];             \
2379
            } else {                                                    \
2380
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2381
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2382
            }                                                           \
2383
        }                                                               \
2384
        *r = result;                                                    \
2385
    }
2386
#if defined(HOST_WORDS_BIGENDIAN)
2387
#define MRGHI 0
2388
#define MRGLO 1
2389
#else
2390
#define MRGHI 1
2391
#define MRGLO 0
2392
#endif
2393
#define VMRG(suffix, element)                   \
2394
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2395
  VMRG_DO(mrgh##suffix, element, MRGLO)
2396
VMRG(b, u8)
2397
VMRG(h, u16)
2398
VMRG(w, u32)
2399
#undef VMRG_DO
2400
#undef VMRG
2401
#undef MRGHI
2402
#undef MRGLO
2403

    
2404
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2405
{
2406
    int32_t prod[16];
2407
    int i;
2408

    
2409
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2410
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2411
    }
2412

    
2413
    VECTOR_FOR_INORDER_I(i, s32) {
2414
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2415
    }
2416
}
2417

    
2418
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2419
{
2420
    int32_t prod[8];
2421
    int i;
2422

    
2423
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2424
        prod[i] = a->s16[i] * b->s16[i];
2425
    }
2426

    
2427
    VECTOR_FOR_INORDER_I(i, s32) {
2428
        r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2429
    }
2430
}
2431

    
2432
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2433
{
2434
    int32_t prod[8];
2435
    int i;
2436
    int sat = 0;
2437

    
2438
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2439
        prod[i] = (int32_t)a->s16[i] * b->s16[i];
2440
    }
2441

    
2442
    VECTOR_FOR_INORDER_I (i, s32) {
2443
        int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2444
        r->u32[i] = cvtsdsw(t, &sat);
2445
    }
2446

    
2447
    if (sat) {
2448
        env->vscr |= (1 << VSCR_SAT);
2449
    }
2450
}
2451

    
2452
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2453
{
2454
    uint16_t prod[16];
2455
    int i;
2456

    
2457
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2458
        prod[i] = a->u8[i] * b->u8[i];
2459
    }
2460

    
2461
    VECTOR_FOR_INORDER_I(i, u32) {
2462
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2463
    }
2464
}
2465

    
2466
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2467
{
2468
    uint32_t prod[8];
2469
    int i;
2470

    
2471
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2472
        prod[i] = a->u16[i] * b->u16[i];
2473
    }
2474

    
2475
    VECTOR_FOR_INORDER_I(i, u32) {
2476
        r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2477
    }
2478
}
2479

    
2480
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2481
{
2482
    uint32_t prod[8];
2483
    int i;
2484
    int sat = 0;
2485

    
2486
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2487
        prod[i] = a->u16[i] * b->u16[i];
2488
    }
2489

    
2490
    VECTOR_FOR_INORDER_I (i, s32) {
2491
        uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2492
        r->u32[i] = cvtuduw(t, &sat);
2493
    }
2494

    
2495
    if (sat) {
2496
        env->vscr |= (1 << VSCR_SAT);
2497
    }
2498
}
2499

    
2500
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2501
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2502
    {                                                                   \
2503
        int i;                                                          \
2504
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2505
            if (evenp) {                                                \
2506
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2507
            } else {                                                    \
2508
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2509
            }                                                           \
2510
        }                                                               \
2511
    }
2512
#define VMUL(suffix, mul_element, prod_element) \
2513
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2514
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2515
VMUL(sb, s8, s16)
2516
VMUL(sh, s16, s32)
2517
VMUL(ub, u8, u16)
2518
VMUL(uh, u16, u32)
2519
#undef VMUL_DO
2520
#undef VMUL
2521

    
2522
void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2523
{
2524
    int i;
2525
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2526
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2527
            /* Need to do the computation is higher precision and round
2528
             * once at the end.  */
2529
            float64 af, bf, cf, t;
2530
            af = float32_to_float64(a->f[i], &env->vec_status);
2531
            bf = float32_to_float64(b->f[i], &env->vec_status);
2532
            cf = float32_to_float64(c->f[i], &env->vec_status);
2533
            t = float64_mul(af, cf, &env->vec_status);
2534
            t = float64_sub(t, bf, &env->vec_status);
2535
            t = float64_chs(t);
2536
            r->f[i] = float64_to_float32(t, &env->vec_status);
2537
        }
2538
    }
2539
}
2540

    
2541
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2542
{
2543
    ppc_avr_t result;
2544
    int i;
2545
    VECTOR_FOR_INORDER_I (i, u8) {
2546
        int s = c->u8[i] & 0x1f;
2547
#if defined(HOST_WORDS_BIGENDIAN)
2548
        int index = s & 0xf;
2549
#else
2550
        int index = 15 - (s & 0xf);
2551
#endif
2552
        if (s & 0x10) {
2553
            result.u8[i] = b->u8[index];
2554
        } else {
2555
            result.u8[i] = a->u8[index];
2556
        }
2557
    }
2558
    *r = result;
2559
}
2560

    
2561
#if defined(HOST_WORDS_BIGENDIAN)
2562
#define PKBIG 1
2563
#else
2564
#define PKBIG 0
2565
#endif
2566
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2567
{
2568
    int i, j;
2569
    ppc_avr_t result;
2570
#if defined(HOST_WORDS_BIGENDIAN)
2571
    const ppc_avr_t *x[2] = { a, b };
2572
#else
2573
    const ppc_avr_t *x[2] = { b, a };
2574
#endif
2575

    
2576
    VECTOR_FOR_INORDER_I (i, u64) {
2577
        VECTOR_FOR_INORDER_I (j, u32){
2578
            uint32_t e = x[i]->u32[j];
2579
            result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2580
                                 ((e >> 6) & 0x3e0) |
2581
                                 ((e >> 3) & 0x1f));
2582
        }
2583
    }
2584
    *r = result;
2585
}
2586

    
2587
#define VPK(suffix, from, to, cvt, dosat)       \
2588
    void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2589
    {                                                                   \
2590
        int i;                                                          \
2591
        int sat = 0;                                                    \
2592
        ppc_avr_t result;                                               \
2593
        ppc_avr_t *a0 = PKBIG ? a : b;                                  \
2594
        ppc_avr_t *a1 = PKBIG ? b : a;                                  \
2595
        VECTOR_FOR_INORDER_I (i, from) {                                \
2596
            result.to[i] = cvt(a0->from[i], &sat);                      \
2597
            result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
2598
        }                                                               \
2599
        *r = result;                                                    \
2600
        if (dosat && sat) {                                             \
2601
            env->vscr |= (1 << VSCR_SAT);                               \
2602
        }                                                               \
2603
    }
2604
#define I(x, y) (x)
2605
VPK(shss, s16, s8, cvtshsb, 1)
2606
VPK(shus, s16, u8, cvtshub, 1)
2607
VPK(swss, s32, s16, cvtswsh, 1)
2608
VPK(swus, s32, u16, cvtswuh, 1)
2609
VPK(uhus, u16, u8, cvtuhub, 1)
2610
VPK(uwus, u32, u16, cvtuwuh, 1)
2611
VPK(uhum, u16, u8, I, 0)
2612
VPK(uwum, u32, u16, I, 0)
2613
#undef I
2614
#undef VPK
2615
#undef PKBIG
2616

    
2617
void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2618
{
2619
    int i;
2620
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2621
        HANDLE_NAN1(r->f[i], b->f[i]) {
2622
            r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2623
        }
2624
    }
2625
}
2626

    
2627
#define VRFI(suffix, rounding)                                          \
2628
    void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2629
    {                                                                   \
2630
        int i;                                                          \
2631
        float_status s = env->vec_status;                               \
2632
        set_float_rounding_mode(rounding, &s);                          \
2633
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2634
            HANDLE_NAN1(r->f[i], b->f[i]) {                             \
2635
                r->f[i] = float32_round_to_int (b->f[i], &s);           \
2636
            }                                                           \
2637
        }                                                               \
2638
    }
2639
VRFI(n, float_round_nearest_even)
2640
VRFI(m, float_round_down)
2641
VRFI(p, float_round_up)
2642
VRFI(z, float_round_to_zero)
2643
#undef VRFI
2644

    
2645
#define VROTATE(suffix, element)                                        \
2646
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2647
    {                                                                   \
2648
        int i;                                                          \
2649
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2650
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2651
            unsigned int shift = b->element[i] & mask;                  \
2652
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2653
        }                                                               \
2654
    }
2655
VROTATE(b, u8)
2656
VROTATE(h, u16)
2657
VROTATE(w, u32)
2658
#undef VROTATE
2659

    
2660
void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2661
{
2662
    int i;
2663
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2664
        HANDLE_NAN1(r->f[i], b->f[i]) {
2665
            float32 t = float32_sqrt(b->f[i], &env->vec_status);
2666
            r->f[i] = float32_div(float32_one, t, &env->vec_status);
2667
        }
2668
    }
2669
}
2670

    
2671
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2672
{
2673
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2674
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2675
}
2676

    
2677
void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
2678
{
2679
    int i;
2680
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2681
        HANDLE_NAN1(r->f[i], b->f[i]) {
2682
            r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2683
        }
2684
    }
2685
}
2686

    
2687
void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2688
{
2689
    int i;
2690
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2691
        HANDLE_NAN1(r->f[i], b->f[i]) {
2692
            r->f[i] = float32_log2(b->f[i], &env->vec_status);
2693
        }
2694
    }
2695
}
2696

    
2697
#if defined(HOST_WORDS_BIGENDIAN)
2698
#define LEFT 0
2699
#define RIGHT 1
2700
#else
2701
#define LEFT 1
2702
#define RIGHT 0
2703
#endif
2704
/* The specification says that the results are undefined if all of the
2705
 * shift counts are not identical.  We check to make sure that they are
2706
 * to conform to what real hardware appears to do.  */
2707
#define VSHIFT(suffix, leftp)                                           \
2708
    void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
2709
    {                                                                   \
2710
        int shift = b->u8[LO_IDX*15] & 0x7;                             \
2711
        int doit = 1;                                                   \
2712
        int i;                                                          \
2713
        for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
2714
            doit = doit && ((b->u8[i] & 0x7) == shift);                 \
2715
        }                                                               \
2716
        if (doit) {                                                     \
2717
            if (shift == 0) {                                           \
2718
                *r = *a;                                                \
2719
            } else if (leftp) {                                         \
2720
                uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
2721
                r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
2722
                r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
2723
            } else {                                                    \
2724
                uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
2725
                r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
2726
                r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
2727
            }                                                           \
2728
        }                                                               \
2729
    }
2730
VSHIFT(l, LEFT)
2731
VSHIFT(r, RIGHT)
2732
#undef VSHIFT
2733
#undef LEFT
2734
#undef RIGHT
2735

    
2736
#define VSL(suffix, element)                                            \
2737
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2738
    {                                                                   \
2739
        int i;                                                          \
2740
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2741
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2742
            unsigned int shift = b->element[i] & mask;                  \
2743
            r->element[i] = a->element[i] << shift;                     \
2744
        }                                                               \
2745
    }
2746
VSL(b, u8)
2747
VSL(h, u16)
2748
VSL(w, u32)
2749
#undef VSL
2750

    
2751
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2752
{
2753
    int sh = shift & 0xf;
2754
    int i;
2755
    ppc_avr_t result;
2756

    
2757
#if defined(HOST_WORDS_BIGENDIAN)
2758
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2759
        int index = sh + i;
2760
        if (index > 0xf) {
2761
            result.u8[i] = b->u8[index-0x10];
2762
        } else {
2763
            result.u8[i] = a->u8[index];
2764
        }
2765
    }
2766
#else
2767
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2768
        int index = (16 - sh) + i;
2769
        if (index > 0xf) {
2770
            result.u8[i] = a->u8[index-0x10];
2771
        } else {
2772
            result.u8[i] = b->u8[index];
2773
        }
2774
    }
2775
#endif
2776
    *r = result;
2777
}
2778

    
2779
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2780
{
2781
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2782

    
2783
#if defined (HOST_WORDS_BIGENDIAN)
2784
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2785
  memset (&r->u8[16-sh], 0, sh);
2786
#else
2787
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2788
  memset (&r->u8[0], 0, sh);
2789
#endif
2790
}
2791

    
2792
/* Experimental testing shows that hardware masks the immediate.  */
2793
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2794
#if defined(HOST_WORDS_BIGENDIAN)
2795
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2796
#else
2797
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2798
#endif
2799
#define VSPLT(suffix, element)                                          \
2800
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2801
    {                                                                   \
2802
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2803
        int i;                                                          \
2804
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2805
            r->element[i] = s;                                          \
2806
        }                                                               \
2807
    }
2808
VSPLT(b, u8)
2809
VSPLT(h, u16)
2810
VSPLT(w, u32)
2811
#undef VSPLT
2812
#undef SPLAT_ELEMENT
2813
#undef _SPLAT_MASKED
2814

    
2815
#define VSPLTI(suffix, element, splat_type)                     \
2816
    void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat)  \
2817
    {                                                           \
2818
        splat_type x = (int8_t)(splat << 3) >> 3;               \
2819
        int i;                                                  \
2820
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
2821
            r->element[i] = x;                                  \
2822
        }                                                       \
2823
    }
2824
VSPLTI(b, s8, int8_t)
2825
VSPLTI(h, s16, int16_t)
2826
VSPLTI(w, s32, int32_t)
2827
#undef VSPLTI
2828

    
2829
#define VSR(suffix, element)                                            \
2830
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2831
    {                                                                   \
2832
        int i;                                                          \
2833
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2834
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2835
            unsigned int shift = b->element[i] & mask;                  \
2836
            r->element[i] = a->element[i] >> shift;                     \
2837
        }                                                               \
2838
    }
2839
VSR(ab, s8)
2840
VSR(ah, s16)
2841
VSR(aw, s32)
2842
VSR(b, u8)
2843
VSR(h, u16)
2844
VSR(w, u32)
2845
#undef VSR
2846

    
2847
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2848
{
2849
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2850

    
2851
#if defined (HOST_WORDS_BIGENDIAN)
2852
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2853
  memset (&r->u8[0], 0, sh);
2854
#else
2855
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2856
  memset (&r->u8[16-sh], 0, sh);
2857
#endif
2858
}
2859

    
2860
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2861
{
2862
    int i;
2863
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2864
        r->u32[i] = a->u32[i] >= b->u32[i];
2865
    }
2866
}
2867

    
2868
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2869
{
2870
    int64_t t;
2871
    int i, upper;
2872
    ppc_avr_t result;
2873
    int sat = 0;
2874

    
2875
#if defined(HOST_WORDS_BIGENDIAN)
2876
    upper = ARRAY_SIZE(r->s32)-1;
2877
#else
2878
    upper = 0;
2879
#endif
2880
    t = (int64_t)b->s32[upper];
2881
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2882
        t += a->s32[i];
2883
        result.s32[i] = 0;
2884
    }
2885
    result.s32[upper] = cvtsdsw(t, &sat);
2886
    *r = result;
2887

    
2888
    if (sat) {
2889
        env->vscr |= (1 << VSCR_SAT);
2890
    }
2891
}
2892

    
2893
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2894
{
2895
    int i, j, upper;
2896
    ppc_avr_t result;
2897
    int sat = 0;
2898

    
2899
#if defined(HOST_WORDS_BIGENDIAN)
2900
    upper = 1;
2901
#else
2902
    upper = 0;
2903
#endif
2904
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2905
        int64_t t = (int64_t)b->s32[upper+i*2];
2906
        result.u64[i] = 0;
2907
        for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2908
            t += a->s32[2*i+j];
2909
        }
2910
        result.s32[upper+i*2] = cvtsdsw(t, &sat);
2911
    }
2912

    
2913
    *r = result;
2914
    if (sat) {
2915
        env->vscr |= (1 << VSCR_SAT);
2916
    }
2917
}
2918

    
2919
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2920
{
2921
    int i, j;
2922
    int sat = 0;
2923

    
2924
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2925
        int64_t t = (int64_t)b->s32[i];
2926
        for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2927
            t += a->s8[4*i+j];
2928
        }
2929
        r->s32[i] = cvtsdsw(t, &sat);
2930
    }
2931

    
2932
    if (sat) {
2933
        env->vscr |= (1 << VSCR_SAT);
2934
    }
2935
}
2936

    
2937
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2938
{
2939
    int sat = 0;
2940
    int i;
2941

    
2942
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2943
        int64_t t = (int64_t)b->s32[i];
2944
        t += a->s16[2*i] + a->s16[2*i+1];
2945
        r->s32[i] = cvtsdsw(t, &sat);
2946
    }
2947

    
2948
    if (sat) {
2949
        env->vscr |= (1 << VSCR_SAT);
2950
    }
2951
}
2952

    
2953
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2954
{
2955
    int i, j;
2956
    int sat = 0;
2957

    
2958
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2959
        uint64_t t = (uint64_t)b->u32[i];
2960
        for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2961
            t += a->u8[4*i+j];
2962
        }
2963
        r->u32[i] = cvtuduw(t, &sat);
2964
    }
2965

    
2966
    if (sat) {
2967
        env->vscr |= (1 << VSCR_SAT);
2968
    }
2969
}
2970

    
2971
#if defined(HOST_WORDS_BIGENDIAN)
2972
#define UPKHI 1
2973
#define UPKLO 0
2974
#else
2975
#define UPKHI 0
2976
#define UPKLO 1
2977
#endif
2978
#define VUPKPX(suffix, hi)                                      \
2979
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
2980
    {                                                           \
2981
        int i;                                                  \
2982
        ppc_avr_t result;                                       \
2983
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
2984
            uint16_t e = b->u16[hi ? i : i+4];                  \
2985
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
2986
            uint8_t r = (e >> 10) & 0x1f;                       \
2987
            uint8_t g = (e >> 5) & 0x1f;                        \
2988
            uint8_t b = e & 0x1f;                               \
2989
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
2990
        }                                                               \
2991
        *r = result;                                                    \
2992
    }
2993
VUPKPX(lpx, UPKLO)
2994
VUPKPX(hpx, UPKHI)
2995
#undef VUPKPX
2996

    
2997
#define VUPK(suffix, unpacked, packee, hi)                              \
2998
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2999
    {                                                                   \
3000
        int i;                                                          \
3001
        ppc_avr_t result;                                               \
3002
        if (hi) {                                                       \
3003
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
3004
                result.unpacked[i] = b->packee[i];                      \
3005
            }                                                           \
3006
        } else {                                                        \
3007
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3008
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3009
            }                                                           \
3010
        }                                                               \
3011
        *r = result;                                                    \
3012
    }
3013
VUPK(hsb, s16, s8, UPKHI)
3014
VUPK(hsh, s32, s16, UPKHI)
3015
VUPK(lsb, s16, s8, UPKLO)
3016
VUPK(lsh, s32, s16, UPKLO)
3017
#undef VUPK
3018
#undef UPKHI
3019
#undef UPKLO
3020

    
3021
#undef DO_HANDLE_NAN
3022
#undef HANDLE_NAN1
3023
#undef HANDLE_NAN2
3024
#undef HANDLE_NAN3
3025
#undef VECTOR_FOR_INORDER_I
3026
#undef HI_IDX
3027
#undef LO_IDX
3028

    
3029
/*****************************************************************************/
3030
/* SPE extension helpers */
3031
/* Use a table to make this quicker */
3032
static uint8_t hbrev[16] = {
3033
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3034
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3035
};
3036

    
3037
static inline uint8_t byte_reverse(uint8_t val)
3038
{
3039
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3040
}
3041

    
3042
static inline uint32_t word_reverse(uint32_t val)
3043
{
3044
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3045
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3046
}
3047

    
3048
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3049
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3050
{
3051
    uint32_t a, b, d, mask;
3052

    
3053
    mask = UINT32_MAX >> (32 - MASKBITS);
3054
    a = arg1 & mask;
3055
    b = arg2 & mask;
3056
    d = word_reverse(1 + word_reverse(a | ~b));
3057
    return (arg1 & ~mask) | (d & b);
3058
}
3059

    
3060
uint32_t helper_cntlsw32 (uint32_t val)
3061
{
3062
    if (val & 0x80000000)
3063
        return clz32(~val);
3064
    else
3065
        return clz32(val);
3066
}
3067

    
3068
uint32_t helper_cntlzw32 (uint32_t val)
3069
{
3070
    return clz32(val);
3071
}
3072

    
3073
/* Single-precision floating-point conversions */
3074
static inline uint32_t efscfsi(uint32_t val)
3075
{
3076
    CPU_FloatU u;
3077

    
3078
    u.f = int32_to_float32(val, &env->vec_status);
3079

    
3080
    return u.l;
3081
}
3082

    
3083
static inline uint32_t efscfui(uint32_t val)
3084
{
3085
    CPU_FloatU u;
3086

    
3087
    u.f = uint32_to_float32(val, &env->vec_status);
3088

    
3089
    return u.l;
3090
}
3091

    
3092
static inline int32_t efsctsi(uint32_t val)
3093
{
3094
    CPU_FloatU u;
3095

    
3096
    u.l = val;
3097
    /* NaN are not treated the same way IEEE 754 does */
3098
    if (unlikely(float32_is_quiet_nan(u.f)))
3099
        return 0;
3100

    
3101
    return float32_to_int32(u.f, &env->vec_status);
3102
}
3103

    
3104
static inline uint32_t efsctui(uint32_t val)
3105
{
3106
    CPU_FloatU u;
3107

    
3108
    u.l = val;
3109
    /* NaN are not treated the same way IEEE 754 does */
3110
    if (unlikely(float32_is_quiet_nan(u.f)))
3111
        return 0;
3112

    
3113
    return float32_to_uint32(u.f, &env->vec_status);
3114
}
3115

    
3116
static inline uint32_t efsctsiz(uint32_t val)
3117
{
3118
    CPU_FloatU u;
3119

    
3120
    u.l = val;
3121
    /* NaN are not treated the same way IEEE 754 does */
3122
    if (unlikely(float32_is_quiet_nan(u.f)))
3123
        return 0;
3124

    
3125
    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3126
}
3127

    
3128
static inline uint32_t efsctuiz(uint32_t val)
3129
{
3130
    CPU_FloatU u;
3131

    
3132
    u.l = val;
3133
    /* NaN are not treated the same way IEEE 754 does */
3134
    if (unlikely(float32_is_quiet_nan(u.f)))
3135
        return 0;
3136

    
3137
    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3138
}
3139

    
3140
static inline uint32_t efscfsf(uint32_t val)
3141
{
3142
    CPU_FloatU u;
3143
    float32 tmp;
3144

    
3145
    u.f = int32_to_float32(val, &env->vec_status);
3146
    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3147
    u.f = float32_div(u.f, tmp, &env->vec_status);
3148

    
3149
    return u.l;
3150
}
3151

    
3152
static inline uint32_t efscfuf(uint32_t val)
3153
{
3154
    CPU_FloatU u;
3155
    float32 tmp;
3156

    
3157
    u.f = uint32_to_float32(val, &env->vec_status);
3158
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3159
    u.f = float32_div(u.f, tmp, &env->vec_status);
3160

    
3161
    return u.l;
3162
}
3163

    
3164
static inline uint32_t efsctsf(uint32_t val)
3165
{
3166
    CPU_FloatU u;
3167
    float32 tmp;
3168

    
3169
    u.l = val;
3170
    /* NaN are not treated the same way IEEE 754 does */
3171
    if (unlikely(float32_is_quiet_nan(u.f)))
3172
        return 0;
3173
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3174
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3175

    
3176
    return float32_to_int32(u.f, &env->vec_status);
3177
}
3178

    
3179
static inline uint32_t efsctuf(uint32_t val)
3180
{
3181
    CPU_FloatU u;
3182
    float32 tmp;
3183

    
3184
    u.l = val;
3185
    /* NaN are not treated the same way IEEE 754 does */
3186
    if (unlikely(float32_is_quiet_nan(u.f)))
3187
        return 0;
3188
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3189
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3190

    
3191
    return float32_to_uint32(u.f, &env->vec_status);
3192
}
3193

    
3194
#define HELPER_SPE_SINGLE_CONV(name)                                          \
3195
uint32_t helper_e##name (uint32_t val)                                        \
3196
{                                                                             \
3197
    return e##name(val);                                                      \
3198
}
3199
/* efscfsi */
3200
HELPER_SPE_SINGLE_CONV(fscfsi);
3201
/* efscfui */
3202
HELPER_SPE_SINGLE_CONV(fscfui);
3203
/* efscfuf */
3204
HELPER_SPE_SINGLE_CONV(fscfuf);
3205
/* efscfsf */
3206
HELPER_SPE_SINGLE_CONV(fscfsf);
3207
/* efsctsi */
3208
HELPER_SPE_SINGLE_CONV(fsctsi);
3209
/* efsctui */
3210
HELPER_SPE_SINGLE_CONV(fsctui);
3211
/* efsctsiz */
3212
HELPER_SPE_SINGLE_CONV(fsctsiz);
3213
/* efsctuiz */
3214
HELPER_SPE_SINGLE_CONV(fsctuiz);
3215
/* efsctsf */
3216
HELPER_SPE_SINGLE_CONV(fsctsf);
3217
/* efsctuf */
3218
HELPER_SPE_SINGLE_CONV(fsctuf);
3219

    
3220
#define HELPER_SPE_VECTOR_CONV(name)                                          \
3221
uint64_t helper_ev##name (uint64_t val)                                       \
3222
{                                                                             \
3223
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
3224
            (uint64_t)e##name(val);                                           \
3225
}
3226
/* evfscfsi */
3227
HELPER_SPE_VECTOR_CONV(fscfsi);
3228
/* evfscfui */
3229
HELPER_SPE_VECTOR_CONV(fscfui);
3230
/* evfscfuf */
3231
HELPER_SPE_VECTOR_CONV(fscfuf);
3232
/* evfscfsf */
3233
HELPER_SPE_VECTOR_CONV(fscfsf);
3234
/* evfsctsi */
3235
HELPER_SPE_VECTOR_CONV(fsctsi);
3236
/* evfsctui */
3237
HELPER_SPE_VECTOR_CONV(fsctui);
3238
/* evfsctsiz */
3239
HELPER_SPE_VECTOR_CONV(fsctsiz);
3240
/* evfsctuiz */
3241
HELPER_SPE_VECTOR_CONV(fsctuiz);
3242
/* evfsctsf */
3243
HELPER_SPE_VECTOR_CONV(fsctsf);
3244
/* evfsctuf */
3245
HELPER_SPE_VECTOR_CONV(fsctuf);
3246

    
3247
/* Single-precision floating-point arithmetic */
3248
static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3249
{
3250
    CPU_FloatU u1, u2;
3251
    u1.l = op1;
3252
    u2.l = op2;
3253
    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3254
    return u1.l;
3255
}
3256

    
3257
static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3258
{
3259
    CPU_FloatU u1, u2;
3260
    u1.l = op1;
3261
    u2.l = op2;
3262
    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3263
    return u1.l;
3264
}
3265

    
3266
static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3267
{
3268
    CPU_FloatU u1, u2;
3269
    u1.l = op1;
3270
    u2.l = op2;
3271
    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3272
    return u1.l;
3273
}
3274

    
3275
static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3276
{
3277
    CPU_FloatU u1, u2;
3278
    u1.l = op1;
3279
    u2.l = op2;
3280
    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3281
    return u1.l;
3282
}
3283

    
3284
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
3285
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3286
{                                                                             \
3287
    return e##name(op1, op2);                                                 \
3288
}
3289
/* efsadd */
3290
HELPER_SPE_SINGLE_ARITH(fsadd);
3291
/* efssub */
3292
HELPER_SPE_SINGLE_ARITH(fssub);
3293
/* efsmul */
3294
HELPER_SPE_SINGLE_ARITH(fsmul);
3295
/* efsdiv */
3296
HELPER_SPE_SINGLE_ARITH(fsdiv);
3297

    
3298
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
3299
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3300
{                                                                             \
3301
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
3302
            (uint64_t)e##name(op1, op2);                                      \
3303
}
3304
/* evfsadd */
3305
HELPER_SPE_VECTOR_ARITH(fsadd);
3306
/* evfssub */
3307
HELPER_SPE_VECTOR_ARITH(fssub);
3308
/* evfsmul */
3309
HELPER_SPE_VECTOR_ARITH(fsmul);
3310
/* evfsdiv */
3311
HELPER_SPE_VECTOR_ARITH(fsdiv);
3312

    
3313
/* Single-precision floating-point comparisons */
3314
static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3315
{
3316
    CPU_FloatU u1, u2;
3317
    u1.l = op1;
3318
    u2.l = op2;
3319
    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3320
}
3321

    
3322
static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3323
{
3324
    CPU_FloatU u1, u2;
3325
    u1.l = op1;
3326
    u2.l = op2;
3327
    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3328
}
3329

    
3330
static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3331
{
3332
    CPU_FloatU u1, u2;
3333
    u1.l = op1;
3334
    u2.l = op2;
3335
    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3336
}
3337

    
3338
static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3339
{
3340
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3341
    return efststlt(op1, op2);
3342
}
3343

    
3344
static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3345
{
3346
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3347
    return efststgt(op1, op2);
3348
}
3349

    
3350
static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3351
{
3352
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3353
    return efststeq(op1, op2);
3354
}
3355

    
3356
#define HELPER_SINGLE_SPE_CMP(name)                                           \
3357
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3358
{                                                                             \
3359
    return e##name(op1, op2) << 2;                                            \
3360
}
3361
/* efststlt */
3362
HELPER_SINGLE_SPE_CMP(fststlt);
3363
/* efststgt */
3364
HELPER_SINGLE_SPE_CMP(fststgt);
3365
/* efststeq */
3366
HELPER_SINGLE_SPE_CMP(fststeq);
3367
/* efscmplt */
3368
HELPER_SINGLE_SPE_CMP(fscmplt);
3369
/* efscmpgt */
3370
HELPER_SINGLE_SPE_CMP(fscmpgt);
3371
/* efscmpeq */
3372
HELPER_SINGLE_SPE_CMP(fscmpeq);
3373

    
3374
static inline uint32_t evcmp_merge(int t0, int t1)
3375
{
3376
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3377
}
3378

    
3379
#define HELPER_VECTOR_SPE_CMP(name)                                           \
3380
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3381
{                                                                             \
3382
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
3383
}
3384
/* evfststlt */
3385
HELPER_VECTOR_SPE_CMP(fststlt);
3386
/* evfststgt */
3387
HELPER_VECTOR_SPE_CMP(fststgt);
3388
/* evfststeq */
3389
HELPER_VECTOR_SPE_CMP(fststeq);
3390
/* evfscmplt */
3391
HELPER_VECTOR_SPE_CMP(fscmplt);
3392
/* evfscmpgt */
3393
HELPER_VECTOR_SPE_CMP(fscmpgt);
3394
/* evfscmpeq */
3395
HELPER_VECTOR_SPE_CMP(fscmpeq);
3396

    
3397
/* Double-precision floating-point conversion */
3398
uint64_t helper_efdcfsi (uint32_t val)
3399
{
3400
    CPU_DoubleU u;
3401

    
3402
    u.d = int32_to_float64(val, &env->vec_status);
3403

    
3404
    return u.ll;
3405
}
3406

    
3407
uint64_t helper_efdcfsid (uint64_t val)
3408
{
3409
    CPU_DoubleU u;
3410

    
3411
    u.d = int64_to_float64(val, &env->vec_status);
3412

    
3413
    return u.ll;
3414
}
3415

    
3416
uint64_t helper_efdcfui (uint32_t val)
3417
{
3418
    CPU_DoubleU u;
3419

    
3420
    u.d = uint32_to_float64(val, &env->vec_status);
3421

    
3422
    return u.ll;
3423
}
3424

    
3425
uint64_t helper_efdcfuid (uint64_t val)
3426
{
3427
    CPU_DoubleU u;
3428

    
3429
    u.d = uint64_to_float64(val, &env->vec_status);
3430

    
3431
    return u.ll;
3432
}
3433

    
3434
uint32_t helper_efdctsi (uint64_t val)
3435
{
3436
    CPU_DoubleU u;
3437

    
3438
    u.ll = val;
3439
    /* NaN are not treated the same way IEEE 754 does */
3440
    if (unlikely(float64_is_quiet_nan(u.d)))
3441
        return 0;
3442

    
3443
    return float64_to_int32(u.d, &env->vec_status);
3444
}
3445

    
3446
uint32_t helper_efdctui (uint64_t val)
3447
{
3448
    CPU_DoubleU u;
3449

    
3450
    u.ll = val;
3451
    /* NaN are not treated the same way IEEE 754 does */
3452
    if (unlikely(float64_is_quiet_nan(u.d)))
3453
        return 0;
3454

    
3455
    return float64_to_uint32(u.d, &env->vec_status);
3456
}
3457

    
3458
uint32_t helper_efdctsiz (uint64_t val)
3459
{
3460
    CPU_DoubleU u;
3461

    
3462
    u.ll = val;
3463
    /* NaN are not treated the same way IEEE 754 does */
3464
    if (unlikely(float64_is_quiet_nan(u.d)))
3465
        return 0;
3466

    
3467
    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3468
}
3469

    
3470
uint64_t helper_efdctsidz (uint64_t val)
3471
{
3472
    CPU_DoubleU u;
3473

    
3474
    u.ll = val;
3475
    /* NaN are not treated the same way IEEE 754 does */
3476
    if (unlikely(float64_is_quiet_nan(u.d)))
3477
        return 0;
3478

    
3479
    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3480
}
3481

    
3482
uint32_t helper_efdctuiz (uint64_t val)
3483
{
3484
    CPU_DoubleU u;
3485

    
3486
    u.ll = val;
3487
    /* NaN are not treated the same way IEEE 754 does */
3488
    if (unlikely(float64_is_quiet_nan(u.d)))
3489
        return 0;
3490

    
3491
    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3492
}
3493

    
3494
uint64_t helper_efdctuidz (uint64_t val)
3495
{
3496
    CPU_DoubleU u;
3497

    
3498
    u.ll = val;
3499
    /* NaN are not treated the same way IEEE 754 does */
3500
    if (unlikely(float64_is_quiet_nan(u.d)))
3501
        return 0;
3502

    
3503
    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3504
}
3505

    
3506
uint64_t helper_efdcfsf (uint32_t val)
3507
{
3508
    CPU_DoubleU u;
3509
    float64 tmp;
3510

    
3511
    u.d = int32_to_float64(val, &env->vec_status);
3512
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3513
    u.d = float64_div(u.d, tmp, &env->vec_status);
3514

    
3515
    return u.ll;
3516
}
3517

    
3518
uint64_t helper_efdcfuf (uint32_t val)
3519
{
3520
    CPU_DoubleU u;
3521
    float64 tmp;
3522

    
3523
    u.d = uint32_to_float64(val, &env->vec_status);
3524
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3525
    u.d = float64_div(u.d, tmp, &env->vec_status);
3526

    
3527
    return u.ll;
3528
}
3529

    
3530
uint32_t helper_efdctsf (uint64_t val)
3531
{
3532
    CPU_DoubleU u;
3533
    float64 tmp;
3534

    
3535
    u.ll = val;
3536
    /* NaN are not treated the same way IEEE 754 does */
3537
    if (unlikely(float64_is_quiet_nan(u.d)))
3538
        return 0;
3539
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3540
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3541

    
3542
    return float64_to_int32(u.d, &env->vec_status);
3543
}
3544

    
3545
uint32_t helper_efdctuf (uint64_t val)
3546
{
3547
    CPU_DoubleU u;
3548
    float64 tmp;
3549

    
3550
    u.ll = val;
3551
    /* NaN are not treated the same way IEEE 754 does */
3552
    if (unlikely(float64_is_quiet_nan(u.d)))
3553
        return 0;
3554
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3555
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3556

    
3557
    return float64_to_uint32(u.d, &env->vec_status);
3558
}
3559

    
3560
uint32_t helper_efscfd (uint64_t val)
3561
{
3562
    CPU_DoubleU u1;
3563
    CPU_FloatU u2;
3564

    
3565
    u1.ll = val;
3566
    u2.f = float64_to_float32(u1.d, &env->vec_status);
3567

    
3568
    return u2.l;
3569
}
3570

    
3571
uint64_t helper_efdcfs (uint32_t val)
3572
{
3573
    CPU_DoubleU u2;
3574
    CPU_FloatU u1;
3575

    
3576
    u1.l = val;
3577
    u2.d = float32_to_float64(u1.f, &env->vec_status);
3578

    
3579
    return u2.ll;
3580
}
3581

    
3582
/* Double precision fixed-point arithmetic */
3583
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3584
{
3585
    CPU_DoubleU u1, u2;
3586
    u1.ll = op1;
3587
    u2.ll = op2;
3588
    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3589
    return u1.ll;
3590
}
3591

    
3592
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3593
{
3594
    CPU_DoubleU u1, u2;
3595
    u1.ll = op1;
3596
    u2.ll = op2;
3597
    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3598
    return u1.ll;
3599
}
3600

    
3601
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3602
{
3603
    CPU_DoubleU u1, u2;
3604
    u1.ll = op1;
3605
    u2.ll = op2;
3606
    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3607
    return u1.ll;
3608
}
3609

    
3610
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3611
{
3612
    CPU_DoubleU u1, u2;
3613
    u1.ll = op1;
3614
    u2.ll = op2;
3615
    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3616
    return u1.ll;
3617
}
3618

    
3619
/* Double precision floating point helpers */
3620
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3621
{
3622
    CPU_DoubleU u1, u2;
3623
    u1.ll = op1;
3624
    u2.ll = op2;
3625
    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3626
}
3627

    
3628
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3629
{
3630
    CPU_DoubleU u1, u2;
3631
    u1.ll = op1;
3632
    u2.ll = op2;
3633
    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3634
}
3635

    
3636
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3637
{
3638
    CPU_DoubleU u1, u2;
3639
    u1.ll = op1;
3640
    u2.ll = op2;
3641
    return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3642
}
3643

    
3644
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3645
{
3646
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3647
    return helper_efdtstlt(op1, op2);
3648
}
3649

    
3650
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3651
{
3652
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3653
    return helper_efdtstgt(op1, op2);
3654
}
3655

    
3656
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3657
{
3658
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3659
    return helper_efdtsteq(op1, op2);
3660
}
3661

    
3662
/*****************************************************************************/
3663
/* Softmmu support */
3664
#if !defined (CONFIG_USER_ONLY)
3665

    
3666
#define MMUSUFFIX _mmu
3667

    
3668
#define SHIFT 0
3669
#include "softmmu_template.h"
3670

    
3671
#define SHIFT 1
3672
#include "softmmu_template.h"
3673

    
3674
#define SHIFT 2
3675
#include "softmmu_template.h"
3676

    
3677
#define SHIFT 3
3678
#include "softmmu_template.h"
3679

    
3680
/* try to fill the TLB and return an exception if error. If retaddr is
3681
   NULL, it means that the function was called in C code (i.e. not
3682
   from generated code or from helper.c) */
3683
/* XXX: fix it to restore all registers */
3684
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3685
{
3686
    TranslationBlock *tb;
3687
    CPUState *saved_env;
3688
    unsigned long pc;
3689
    int ret;
3690

    
3691
    /* XXX: hack to restore env in all cases, even if not called from
3692
       generated code */
3693
    saved_env = env;
3694
    env = cpu_single_env;
3695
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3696
    if (unlikely(ret != 0)) {
3697
        if (likely(retaddr)) {
3698
            /* now we have a real cpu fault */
3699
            pc = (unsigned long)retaddr;
3700
            tb = tb_find_pc(pc);
3701
            if (likely(tb)) {
3702
                /* the PC is inside the translated code. It means that we have
3703
                   a virtual CPU fault */
3704
                cpu_restore_state(tb, env, pc, NULL);
3705
            }
3706
        }
3707
        helper_raise_exception_err(env->exception_index, env->error_code);
3708
    }
3709
    env = saved_env;
3710
}
3711

    
3712
/* Segment registers load and store */
3713
target_ulong helper_load_sr (target_ulong sr_num)
3714
{
3715
#if defined(TARGET_PPC64)
3716
    if (env->mmu_model & POWERPC_MMU_64)
3717
        return ppc_load_sr(env, sr_num);
3718
#endif
3719
    return env->sr[sr_num];
3720
}
3721

    
3722
void helper_store_sr (target_ulong sr_num, target_ulong val)
3723
{
3724
    ppc_store_sr(env, sr_num, val);
3725
}
3726

    
3727
/* SLB management */
3728
#if defined(TARGET_PPC64)
3729
target_ulong helper_load_slb (target_ulong slb_nr)
3730
{
3731
    return ppc_load_slb(env, slb_nr);
3732
}
3733

    
3734
void helper_store_slb (target_ulong rb, target_ulong rs)
3735
{
3736
    ppc_store_slb(env, rb, rs);
3737
}
3738

    
3739
void helper_slbia (void)
3740
{
3741
    ppc_slb_invalidate_all(env);
3742
}
3743

    
3744
void helper_slbie (target_ulong addr)
3745
{
3746
    ppc_slb_invalidate_one(env, addr);
3747
}
3748

    
3749
#endif /* defined(TARGET_PPC64) */
3750

    
3751
/* TLB management */
3752
void helper_tlbia (void)
3753
{
3754
    ppc_tlb_invalidate_all(env);
3755
}
3756

    
3757
void helper_tlbie (target_ulong addr)
3758
{
3759
    ppc_tlb_invalidate_one(env, addr);
3760
}
3761

    
3762
/* Software driven TLBs management */
3763
/* PowerPC 602/603 software TLB load instructions helpers */
3764
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3765
{
3766
    target_ulong RPN, CMP, EPN;
3767
    int way;
3768

    
3769
    RPN = env->spr[SPR_RPA];
3770
    if (is_code) {
3771
        CMP = env->spr[SPR_ICMP];
3772
        EPN = env->spr[SPR_IMISS];
3773
    } else {
3774
        CMP = env->spr[SPR_DCMP];
3775
        EPN = env->spr[SPR_DMISS];
3776
    }
3777
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3778
    (void)EPN; /* avoid a compiler warning */
3779
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3780
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3781
              RPN, way);
3782
    /* Store this TLB */
3783
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3784
                     way, is_code, CMP, RPN);
3785
}
3786

    
3787
void helper_6xx_tlbd (target_ulong EPN)
3788
{
3789
    do_6xx_tlb(EPN, 0);
3790
}
3791

    
3792
void helper_6xx_tlbi (target_ulong EPN)
3793
{
3794
    do_6xx_tlb(EPN, 1);
3795
}
3796

    
3797
/* PowerPC 74xx software TLB load instructions helpers */
3798
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3799
{
3800
    target_ulong RPN, CMP, EPN;
3801
    int way;
3802

    
3803
    RPN = env->spr[SPR_PTELO];
3804
    CMP = env->spr[SPR_PTEHI];
3805
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3806
    way = env->spr[SPR_TLBMISS] & 0x3;
3807
    (void)EPN; /* avoid a compiler warning */
3808
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3809
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3810
              RPN, way);
3811
    /* Store this TLB */
3812
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3813
                     way, is_code, CMP, RPN);
3814
}
3815

    
3816
void helper_74xx_tlbd (target_ulong EPN)
3817
{
3818
    do_74xx_tlb(EPN, 0);
3819
}
3820

    
3821
void helper_74xx_tlbi (target_ulong EPN)
3822
{
3823
    do_74xx_tlb(EPN, 1);
3824
}
3825

    
3826
static inline target_ulong booke_tlb_to_page_size(int size)
3827
{
3828
    return 1024 << (2 * size);
3829
}
3830

    
3831
static inline int booke_page_size_to_tlb(target_ulong page_size)
3832
{
3833
    int size;
3834

    
3835
    switch (page_size) {
3836
    case 0x00000400UL:
3837
        size = 0x0;
3838
        break;
3839
    case 0x00001000UL:
3840
        size = 0x1;
3841
        break;
3842
    case 0x00004000UL:
3843
        size = 0x2;
3844
        break;
3845
    case 0x00010000UL:
3846
        size = 0x3;
3847
        break;
3848
    case 0x00040000UL:
3849
        size = 0x4;
3850
        break;
3851
    case 0x00100000UL:
3852
        size = 0x5;
3853
        break;
3854
    case 0x00400000UL:
3855
        size = 0x6;
3856
        break;
3857
    case 0x01000000UL:
3858
        size = 0x7;
3859
        break;
3860
    case 0x04000000UL:
3861
        size = 0x8;
3862
        break;
3863
    case 0x10000000UL:
3864
        size = 0x9;
3865
        break;
3866
    case 0x40000000UL:
3867
        size = 0xA;
3868
        break;
3869
#if defined (TARGET_PPC64)
3870
    case 0x000100000000ULL:
3871
        size = 0xB;
3872
        break;
3873
    case 0x000400000000ULL:
3874
        size = 0xC;
3875
        break;
3876
    case 0x001000000000ULL:
3877
        size = 0xD;
3878
        break;
3879
    case 0x004000000000ULL:
3880
        size = 0xE;
3881
        break;
3882
    case 0x010000000000ULL:
3883
        size = 0xF;
3884
        break;
3885
#endif
3886
    default:
3887
        size = -1;
3888
        break;
3889
    }
3890

    
3891
    return size;
3892
}
3893

    
3894
/* Helpers for 4xx TLB management */
3895
#define PPC4XX_TLB_ENTRY_MASK       0x0000003f  /* Mask for 64 TLB entries */
3896

    
3897
#define PPC4XX_TLBHI_V              0x00000040
3898
#define PPC4XX_TLBHI_E              0x00000020
3899
#define PPC4XX_TLBHI_SIZE_MIN       0
3900
#define PPC4XX_TLBHI_SIZE_MAX       7
3901
#define PPC4XX_TLBHI_SIZE_DEFAULT   1
3902
#define PPC4XX_TLBHI_SIZE_SHIFT     7
3903
#define PPC4XX_TLBHI_SIZE_MASK      0x00000007
3904

    
3905
#define PPC4XX_TLBLO_EX             0x00000200
3906
#define PPC4XX_TLBLO_WR             0x00000100
3907
#define PPC4XX_TLBLO_ATTR_MASK      0x000000FF
3908
#define PPC4XX_TLBLO_RPN_MASK       0xFFFFFC00
3909

    
3910
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3911
{
3912
    ppcemb_tlb_t *tlb;
3913
    target_ulong ret;
3914
    int size;
3915

    
3916
    entry &= PPC4XX_TLB_ENTRY_MASK;
3917
    tlb = &env->tlb[entry].tlbe;
3918
    ret = tlb->EPN;
3919
    if (tlb->prot & PAGE_VALID) {
3920
        ret |= PPC4XX_TLBHI_V;
3921
    }
3922
    size = booke_page_size_to_tlb(tlb->size);
3923
    if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
3924
        size = PPC4XX_TLBHI_SIZE_DEFAULT;
3925
    }
3926
    ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
3927
    env->spr[SPR_40x_PID] = tlb->PID;
3928
    return ret;
3929
}
3930

    
3931
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3932
{
3933
    ppcemb_tlb_t *tlb;
3934
    target_ulong ret;
3935

    
3936
    entry &= PPC4XX_TLB_ENTRY_MASK;
3937
    tlb = &env->tlb[entry].tlbe;
3938
    ret = tlb->RPN;
3939
    if (tlb->prot & PAGE_EXEC) {
3940
        ret |= PPC4XX_TLBLO_EX;
3941
    }
3942
    if (tlb->prot & PAGE_WRITE) {
3943
        ret |= PPC4XX_TLBLO_WR;
3944
    }
3945
    return ret;
3946
}
3947

    
3948
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3949
{
3950
    ppcemb_tlb_t *tlb;
3951
    target_ulong page, end;
3952

    
3953
    LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
3954
              val);
3955
    entry &= PPC4XX_TLB_ENTRY_MASK;
3956
    tlb = &env->tlb[entry].tlbe;
3957
    /* Invalidate previous TLB (if it's valid) */
3958
    if (tlb->prot & PAGE_VALID) {
3959
        end = tlb->EPN + tlb->size;
3960
        LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
3961
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
3962
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
3963
            tlb_flush_page(env, page);
3964
        }
3965
    }
3966
    tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
3967
                                       & PPC4XX_TLBHI_SIZE_MASK);
3968
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3969
     * If this ever occurs, one should use the ppcemb target instead
3970
     * of the ppc or ppc64 one
3971
     */
3972
    if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
3973
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3974
                  "are not supported (%d)\n",
3975
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3976
    }
3977
    tlb->EPN = val & ~(tlb->size - 1);
3978
    if (val & PPC4XX_TLBHI_V) {
3979
        tlb->prot |= PAGE_VALID;
3980
        if (val & PPC4XX_TLBHI_E) {
3981
            /* XXX: TO BE FIXED */
3982
            cpu_abort(env,
3983
                      "Little-endian TLB entries are not supported by now\n");
3984
        }
3985
    } else {
3986
        tlb->prot &= ~PAGE_VALID;
3987
    }
3988
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3989
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
3990
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
3991
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3992
              tlb->prot & PAGE_READ ? 'r' : '-',
3993
              tlb->prot & PAGE_WRITE ? 'w' : '-',
3994
              tlb->prot & PAGE_EXEC ? 'x' : '-',
3995
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3996
    /* Invalidate new TLB (if valid) */
3997
    if (tlb->prot & PAGE_VALID) {
3998
        end = tlb->EPN + tlb->size;
3999
        LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4000
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4001
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4002
            tlb_flush_page(env, page);
4003
        }
4004
    }
4005
}
4006

    
4007
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4008
{
4009
    ppcemb_tlb_t *tlb;
4010

    
4011
    LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4012
              val);
4013
    entry &= PPC4XX_TLB_ENTRY_MASK;
4014
    tlb = &env->tlb[entry].tlbe;
4015
    tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4016
    tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4017
    tlb->prot = PAGE_READ;
4018
    if (val & PPC4XX_TLBLO_EX) {
4019
        tlb->prot |= PAGE_EXEC;
4020
    }
4021
    if (val & PPC4XX_TLBLO_WR) {
4022
        tlb->prot |= PAGE_WRITE;
4023
    }
4024
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4025
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4026
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4027
              tlb->prot & PAGE_READ ? 'r' : '-',
4028
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4029
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4030
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4031
}
4032

    
4033
target_ulong helper_4xx_tlbsx (target_ulong address)
4034
{
4035
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4036
}
4037

    
4038
/* PowerPC 440 TLB management */
4039
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4040
{
4041
    ppcemb_tlb_t *tlb;
4042
    target_ulong EPN, RPN, size;
4043
    int do_flush_tlbs;
4044

    
4045
    LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4046
              __func__, word, (int)entry, value);
4047
    do_flush_tlbs = 0;
4048
    entry &= 0x3F;
4049
    tlb = &env->tlb[entry].tlbe;
4050
    switch (word) {
4051
    default:
4052
        /* Just here to please gcc */
4053
    case 0:
4054
        EPN = value & 0xFFFFFC00;
4055
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4056
            do_flush_tlbs = 1;
4057
        tlb->EPN = EPN;
4058
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
4059
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4060
            do_flush_tlbs = 1;
4061
        tlb->size = size;
4062
        tlb->attr &= ~0x1;
4063
        tlb->attr |= (value >> 8) & 1;
4064
        if (value & 0x200) {
4065
            tlb->prot |= PAGE_VALID;
4066
        } else {
4067
            if (tlb->prot & PAGE_VALID) {
4068
                tlb->prot &= ~PAGE_VALID;
4069
                do_flush_tlbs = 1;
4070
            }
4071
        }
4072
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4073
        if (do_flush_tlbs)
4074
            tlb_flush(env, 1);
4075
        break;
4076
    case 1:
4077
        RPN = value & 0xFFFFFC0F;
4078
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4079
            tlb_flush(env, 1);
4080
        tlb->RPN = RPN;
4081
        break;
4082
    case 2:
4083
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4084
        tlb->prot = tlb->prot & PAGE_VALID;
4085
        if (value & 0x1)
4086
            tlb->prot |= PAGE_READ << 4;
4087
        if (value & 0x2)
4088
            tlb->prot |= PAGE_WRITE << 4;
4089
        if (value & 0x4)
4090
            tlb->prot |= PAGE_EXEC << 4;
4091
        if (value & 0x8)
4092
            tlb->prot |= PAGE_READ;
4093
        if (value & 0x10)
4094
            tlb->prot |= PAGE_WRITE;
4095
        if (value & 0x20)
4096
            tlb->prot |= PAGE_EXEC;
4097
        break;
4098
    }
4099
}
4100

    
4101
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4102
{
4103
    ppcemb_tlb_t *tlb;
4104
    target_ulong ret;
4105
    int size;
4106

    
4107
    entry &= 0x3F;
4108
    tlb = &env->tlb[entry].tlbe;
4109
    switch (word) {
4110
    default:
4111
        /* Just here to please gcc */
4112
    case 0:
4113
        ret = tlb->EPN;
4114
        size = booke_page_size_to_tlb(tlb->size);
4115
        if (size < 0 || size > 0xF)
4116
            size = 1;
4117
        ret |= size << 4;
4118
        if (tlb->attr & 0x1)
4119
            ret |= 0x100;
4120
        if (tlb->prot & PAGE_VALID)
4121
            ret |= 0x200;
4122
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4123
        env->spr[SPR_440_MMUCR] |= tlb->PID;
4124
        break;
4125
    case 1:
4126
        ret = tlb->RPN;
4127
        break;
4128
    case 2:
4129
        ret = tlb->attr & ~0x1;
4130
        if (tlb->prot & (PAGE_READ << 4))
4131
            ret |= 0x1;
4132
        if (tlb->prot & (PAGE_WRITE << 4))
4133
            ret |= 0x2;
4134
        if (tlb->prot & (PAGE_EXEC << 4))
4135
            ret |= 0x4;
4136
        if (tlb->prot & PAGE_READ)
4137
            ret |= 0x8;
4138
        if (tlb->prot & PAGE_WRITE)
4139
            ret |= 0x10;
4140
        if (tlb->prot & PAGE_EXEC)
4141
            ret |= 0x20;
4142
        break;
4143
    }
4144
    return ret;
4145
}
4146

    
4147
target_ulong helper_440_tlbsx (target_ulong address)
4148
{
4149
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4150
}
4151

    
4152
#endif /* !CONFIG_USER_ONLY */