Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ 618ba8e6

History | View | Annotate | Download (126.1 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <string.h>
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "helper.h"
23

    
24
#include "helper_regs.h"
25

    
26
//#define DEBUG_OP
27
//#define DEBUG_EXCEPTIONS
28
//#define DEBUG_SOFTWARE_TLB
29

    
30
#ifdef DEBUG_SOFTWARE_TLB
31
#  define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
32
#else
33
#  define LOG_SWTLB(...) do { } while (0)
34
#endif
35

    
36

    
37
/*****************************************************************************/
38
/* Exceptions processing helpers */
39

    
40
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
41
{
42
#if 0
43
    printf("Raise exception %3x code : %d\n", exception, error_code);
44
#endif
45
    env->exception_index = exception;
46
    env->error_code = error_code;
47
    cpu_loop_exit();
48
}
49

    
50
void helper_raise_exception (uint32_t exception)
51
{
52
    helper_raise_exception_err(exception, 0);
53
}
54

    
55
/*****************************************************************************/
56
/* SPR accesses */
57
void helper_load_dump_spr (uint32_t sprn)
58
{
59
    qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
60
             env->spr[sprn]);
61
}
62

    
63
void helper_store_dump_spr (uint32_t sprn)
64
{
65
    qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
66
             env->spr[sprn]);
67
}
68

    
69
target_ulong helper_load_tbl (void)
70
{
71
    return (target_ulong)cpu_ppc_load_tbl(env);
72
}
73

    
74
target_ulong helper_load_tbu (void)
75
{
76
    return cpu_ppc_load_tbu(env);
77
}
78

    
79
target_ulong helper_load_atbl (void)
80
{
81
    return (target_ulong)cpu_ppc_load_atbl(env);
82
}
83

    
84
target_ulong helper_load_atbu (void)
85
{
86
    return cpu_ppc_load_atbu(env);
87
}
88

    
89
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
90
target_ulong helper_load_purr (void)
91
{
92
    return (target_ulong)cpu_ppc_load_purr(env);
93
}
94
#endif
95

    
96
target_ulong helper_load_601_rtcl (void)
97
{
98
    return cpu_ppc601_load_rtcl(env);
99
}
100

    
101
target_ulong helper_load_601_rtcu (void)
102
{
103
    return cpu_ppc601_load_rtcu(env);
104
}
105

    
106
#if !defined(CONFIG_USER_ONLY)
107
#if defined (TARGET_PPC64)
108
void helper_store_asr (target_ulong val)
109
{
110
    ppc_store_asr(env, val);
111
}
112
#endif
113

    
114
void helper_store_sdr1 (target_ulong val)
115
{
116
    ppc_store_sdr1(env, val);
117
}
118

    
119
void helper_store_tbl (target_ulong val)
120
{
121
    cpu_ppc_store_tbl(env, val);
122
}
123

    
124
void helper_store_tbu (target_ulong val)
125
{
126
    cpu_ppc_store_tbu(env, val);
127
}
128

    
129
void helper_store_atbl (target_ulong val)
130
{
131
    cpu_ppc_store_atbl(env, val);
132
}
133

    
134
void helper_store_atbu (target_ulong val)
135
{
136
    cpu_ppc_store_atbu(env, val);
137
}
138

    
139
void helper_store_601_rtcl (target_ulong val)
140
{
141
    cpu_ppc601_store_rtcl(env, val);
142
}
143

    
144
void helper_store_601_rtcu (target_ulong val)
145
{
146
    cpu_ppc601_store_rtcu(env, val);
147
}
148

    
149
target_ulong helper_load_decr (void)
150
{
151
    return cpu_ppc_load_decr(env);
152
}
153

    
154
void helper_store_decr (target_ulong val)
155
{
156
    cpu_ppc_store_decr(env, val);
157
}
158

    
159
void helper_store_hid0_601 (target_ulong val)
160
{
161
    target_ulong hid0;
162

    
163
    hid0 = env->spr[SPR_HID0];
164
    if ((val ^ hid0) & 0x00000008) {
165
        /* Change current endianness */
166
        env->hflags &= ~(1 << MSR_LE);
167
        env->hflags_nmsr &= ~(1 << MSR_LE);
168
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
169
        env->hflags |= env->hflags_nmsr;
170
        qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
171
                 val & 0x8 ? 'l' : 'b', env->hflags);
172
    }
173
    env->spr[SPR_HID0] = (uint32_t)val;
174
}
175

    
176
void helper_store_403_pbr (uint32_t num, target_ulong value)
177
{
178
    if (likely(env->pb[num] != value)) {
179
        env->pb[num] = value;
180
        /* Should be optimized */
181
        tlb_flush(env, 1);
182
    }
183
}
184

    
185
target_ulong helper_load_40x_pit (void)
186
{
187
    return load_40x_pit(env);
188
}
189

    
190
void helper_store_40x_pit (target_ulong val)
191
{
192
    store_40x_pit(env, val);
193
}
194

    
195
void helper_store_40x_dbcr0 (target_ulong val)
196
{
197
    store_40x_dbcr0(env, val);
198
}
199

    
200
void helper_store_40x_sler (target_ulong val)
201
{
202
    store_40x_sler(env, val);
203
}
204

    
205
void helper_store_booke_tcr (target_ulong val)
206
{
207
    store_booke_tcr(env, val);
208
}
209

    
210
void helper_store_booke_tsr (target_ulong val)
211
{
212
    store_booke_tsr(env, val);
213
}
214

    
215
void helper_store_ibatu (uint32_t nr, target_ulong val)
216
{
217
    ppc_store_ibatu(env, nr, val);
218
}
219

    
220
void helper_store_ibatl (uint32_t nr, target_ulong val)
221
{
222
    ppc_store_ibatl(env, nr, val);
223
}
224

    
225
void helper_store_dbatu (uint32_t nr, target_ulong val)
226
{
227
    ppc_store_dbatu(env, nr, val);
228
}
229

    
230
void helper_store_dbatl (uint32_t nr, target_ulong val)
231
{
232
    ppc_store_dbatl(env, nr, val);
233
}
234

    
235
void helper_store_601_batl (uint32_t nr, target_ulong val)
236
{
237
    ppc_store_ibatl_601(env, nr, val);
238
}
239

    
240
void helper_store_601_batu (uint32_t nr, target_ulong val)
241
{
242
    ppc_store_ibatu_601(env, nr, val);
243
}
244
#endif
245

    
246
/*****************************************************************************/
247
/* Memory load and stores */
248

    
249
static inline target_ulong addr_add(target_ulong addr, target_long arg)
250
{
251
#if defined(TARGET_PPC64)
252
        if (!msr_sf)
253
            return (uint32_t)(addr + arg);
254
        else
255
#endif
256
            return addr + arg;
257
}
258

    
259
void helper_lmw (target_ulong addr, uint32_t reg)
260
{
261
    for (; reg < 32; reg++) {
262
        if (msr_le)
263
            env->gpr[reg] = bswap32(ldl(addr));
264
        else
265
            env->gpr[reg] = ldl(addr);
266
        addr = addr_add(addr, 4);
267
    }
268
}
269

    
270
void helper_stmw (target_ulong addr, uint32_t reg)
271
{
272
    for (; reg < 32; reg++) {
273
        if (msr_le)
274
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
275
        else
276
            stl(addr, (uint32_t)env->gpr[reg]);
277
        addr = addr_add(addr, 4);
278
    }
279
}
280

    
281
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
282
{
283
    int sh;
284
    for (; nb > 3; nb -= 4) {
285
        env->gpr[reg] = ldl(addr);
286
        reg = (reg + 1) % 32;
287
        addr = addr_add(addr, 4);
288
    }
289
    if (unlikely(nb > 0)) {
290
        env->gpr[reg] = 0;
291
        for (sh = 24; nb > 0; nb--, sh -= 8) {
292
            env->gpr[reg] |= ldub(addr) << sh;
293
            addr = addr_add(addr, 1);
294
        }
295
    }
296
}
297
/* PPC32 specification says we must generate an exception if
298
 * rA is in the range of registers to be loaded.
299
 * In an other hand, IBM says this is valid, but rA won't be loaded.
300
 * For now, I'll follow the spec...
301
 */
302
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
303
{
304
    if (likely(xer_bc != 0)) {
305
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
306
                     (reg < rb && (reg + xer_bc) > rb))) {
307
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
308
                                       POWERPC_EXCP_INVAL |
309
                                       POWERPC_EXCP_INVAL_LSWX);
310
        } else {
311
            helper_lsw(addr, xer_bc, reg);
312
        }
313
    }
314
}
315

    
316
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
317
{
318
    int sh;
319
    for (; nb > 3; nb -= 4) {
320
        stl(addr, env->gpr[reg]);
321
        reg = (reg + 1) % 32;
322
        addr = addr_add(addr, 4);
323
    }
324
    if (unlikely(nb > 0)) {
325
        for (sh = 24; nb > 0; nb--, sh -= 8) {
326
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
327
            addr = addr_add(addr, 1);
328
        }
329
    }
330
}
331

    
332
static void do_dcbz(target_ulong addr, int dcache_line_size)
333
{
334
    addr &= ~(dcache_line_size - 1);
335
    int i;
336
    for (i = 0 ; i < dcache_line_size ; i += 4) {
337
        stl(addr + i , 0);
338
    }
339
    if (env->reserve_addr == addr)
340
        env->reserve_addr = (target_ulong)-1ULL;
341
}
342

    
343
void helper_dcbz(target_ulong addr)
344
{
345
    do_dcbz(addr, env->dcache_line_size);
346
}
347

    
348
void helper_dcbz_970(target_ulong addr)
349
{
350
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
351
        do_dcbz(addr, 32);
352
    else
353
        do_dcbz(addr, env->dcache_line_size);
354
}
355

    
356
void helper_icbi(target_ulong addr)
357
{
358
    addr &= ~(env->dcache_line_size - 1);
359
    /* Invalidate one cache line :
360
     * PowerPC specification says this is to be treated like a load
361
     * (not a fetch) by the MMU. To be sure it will be so,
362
     * do the load "by hand".
363
     */
364
    ldl(addr);
365
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
366
}
367

    
368
// XXX: to be tested
369
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
370
{
371
    int i, c, d;
372
    d = 24;
373
    for (i = 0; i < xer_bc; i++) {
374
        c = ldub(addr);
375
        addr = addr_add(addr, 1);
376
        /* ra (if not 0) and rb are never modified */
377
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
378
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
379
        }
380
        if (unlikely(c == xer_cmp))
381
            break;
382
        if (likely(d != 0)) {
383
            d -= 8;
384
        } else {
385
            d = 24;
386
            reg++;
387
            reg = reg & 0x1F;
388
        }
389
    }
390
    return i;
391
}
392

    
393
/*****************************************************************************/
394
/* Fixed point operations helpers */
395
#if defined(TARGET_PPC64)
396

    
397
/* multiply high word */
398
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
399
{
400
    uint64_t tl, th;
401

    
402
    muls64(&tl, &th, arg1, arg2);
403
    return th;
404
}
405

    
406
/* multiply high word unsigned */
407
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
408
{
409
    uint64_t tl, th;
410

    
411
    mulu64(&tl, &th, arg1, arg2);
412
    return th;
413
}
414

    
415
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
416
{
417
    int64_t th;
418
    uint64_t tl;
419

    
420
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
421
    /* If th != 0 && th != -1, then we had an overflow */
422
    if (likely((uint64_t)(th + 1) <= 1)) {
423
        env->xer &= ~(1 << XER_OV);
424
    } else {
425
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
426
    }
427
    return (int64_t)tl;
428
}
429
#endif
430

    
431
target_ulong helper_cntlzw (target_ulong t)
432
{
433
    return clz32(t);
434
}
435

    
436
#if defined(TARGET_PPC64)
437
target_ulong helper_cntlzd (target_ulong t)
438
{
439
    return clz64(t);
440
}
441
#endif
442

    
443
/* shift right arithmetic helper */
444
target_ulong helper_sraw (target_ulong value, target_ulong shift)
445
{
446
    int32_t ret;
447

    
448
    if (likely(!(shift & 0x20))) {
449
        if (likely((uint32_t)shift != 0)) {
450
            shift &= 0x1f;
451
            ret = (int32_t)value >> shift;
452
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
453
                env->xer &= ~(1 << XER_CA);
454
            } else {
455
                env->xer |= (1 << XER_CA);
456
            }
457
        } else {
458
            ret = (int32_t)value;
459
            env->xer &= ~(1 << XER_CA);
460
        }
461
    } else {
462
        ret = (int32_t)value >> 31;
463
        if (ret) {
464
            env->xer |= (1 << XER_CA);
465
        } else {
466
            env->xer &= ~(1 << XER_CA);
467
        }
468
    }
469
    return (target_long)ret;
470
}
471

    
472
#if defined(TARGET_PPC64)
473
target_ulong helper_srad (target_ulong value, target_ulong shift)
474
{
475
    int64_t ret;
476

    
477
    if (likely(!(shift & 0x40))) {
478
        if (likely((uint64_t)shift != 0)) {
479
            shift &= 0x3f;
480
            ret = (int64_t)value >> shift;
481
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
482
                env->xer &= ~(1 << XER_CA);
483
            } else {
484
                env->xer |= (1 << XER_CA);
485
            }
486
        } else {
487
            ret = (int64_t)value;
488
            env->xer &= ~(1 << XER_CA);
489
        }
490
    } else {
491
        ret = (int64_t)value >> 63;
492
        if (ret) {
493
            env->xer |= (1 << XER_CA);
494
        } else {
495
            env->xer &= ~(1 << XER_CA);
496
        }
497
    }
498
    return ret;
499
}
500
#endif
501

    
502
#if defined(TARGET_PPC64)
503
target_ulong helper_popcntb (target_ulong val)
504
{
505
    val = (val & 0x5555555555555555ULL) + ((val >>  1) &
506
                                           0x5555555555555555ULL);
507
    val = (val & 0x3333333333333333ULL) + ((val >>  2) &
508
                                           0x3333333333333333ULL);
509
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) &
510
                                           0x0f0f0f0f0f0f0f0fULL);
511
    return val;
512
}
513

    
514
target_ulong helper_popcntw (target_ulong val)
515
{
516
    val = (val & 0x5555555555555555ULL) + ((val >>  1) &
517
                                           0x5555555555555555ULL);
518
    val = (val & 0x3333333333333333ULL) + ((val >>  2) &
519
                                           0x3333333333333333ULL);
520
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) &
521
                                           0x0f0f0f0f0f0f0f0fULL);
522
    val = (val & 0x00ff00ff00ff00ffULL) + ((val >>  8) &
523
                                           0x00ff00ff00ff00ffULL);
524
    val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
525
                                           0x0000ffff0000ffffULL);
526
    return val;
527
}
528

    
529
target_ulong helper_popcntd (target_ulong val)
530
{
531
    return ctpop64(val);
532
}
533
#else
534
target_ulong helper_popcntb (target_ulong val)
535
{
536
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
537
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
538
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
539
    return val;
540
}
541

    
542
target_ulong helper_popcntw (target_ulong val)
543
{
544
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
545
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
546
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
547
    val = (val & 0x00ff00ff) + ((val >>  8) & 0x00ff00ff);
548
    val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
549
    return val;
550
}
551
#endif
552

    
553
/*****************************************************************************/
554
/* Floating point operations helpers */
555
uint64_t helper_float32_to_float64(uint32_t arg)
556
{
557
    CPU_FloatU f;
558
    CPU_DoubleU d;
559
    f.l = arg;
560
    d.d = float32_to_float64(f.f, &env->fp_status);
561
    return d.ll;
562
}
563

    
564
uint32_t helper_float64_to_float32(uint64_t arg)
565
{
566
    CPU_FloatU f;
567
    CPU_DoubleU d;
568
    d.ll = arg;
569
    f.f = float64_to_float32(d.d, &env->fp_status);
570
    return f.l;
571
}
572

    
573
static inline int isden(float64 d)
574
{
575
    CPU_DoubleU u;
576

    
577
    u.d = d;
578

    
579
    return ((u.ll >> 52) & 0x7FF) == 0;
580
}
581

    
582
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
583
{
584
    CPU_DoubleU farg;
585
    int isneg;
586
    int ret;
587
    farg.ll = arg;
588
    isneg = float64_is_neg(farg.d);
589
    if (unlikely(float64_is_any_nan(farg.d))) {
590
        if (float64_is_signaling_nan(farg.d)) {
591
            /* Signaling NaN: flags are undefined */
592
            ret = 0x00;
593
        } else {
594
            /* Quiet NaN */
595
            ret = 0x11;
596
        }
597
    } else if (unlikely(float64_is_infinity(farg.d))) {
598
        /* +/- infinity */
599
        if (isneg)
600
            ret = 0x09;
601
        else
602
            ret = 0x05;
603
    } else {
604
        if (float64_is_zero(farg.d)) {
605
            /* +/- zero */
606
            if (isneg)
607
                ret = 0x12;
608
            else
609
                ret = 0x02;
610
        } else {
611
            if (isden(farg.d)) {
612
                /* Denormalized numbers */
613
                ret = 0x10;
614
            } else {
615
                /* Normalized numbers */
616
                ret = 0x00;
617
            }
618
            if (isneg) {
619
                ret |= 0x08;
620
            } else {
621
                ret |= 0x04;
622
            }
623
        }
624
    }
625
    if (set_fprf) {
626
        /* We update FPSCR_FPRF */
627
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
628
        env->fpscr |= ret << FPSCR_FPRF;
629
    }
630
    /* We just need fpcc to update Rc1 */
631
    return ret & 0xF;
632
}
633

    
634
/* Floating-point invalid operations exception */
635
static inline uint64_t fload_invalid_op_excp(int op)
636
{
637
    uint64_t ret = 0;
638
    int ve;
639

    
640
    ve = fpscr_ve;
641
    switch (op) {
642
    case POWERPC_EXCP_FP_VXSNAN:
643
        env->fpscr |= 1 << FPSCR_VXSNAN;
644
        break;
645
    case POWERPC_EXCP_FP_VXSOFT:
646
        env->fpscr |= 1 << FPSCR_VXSOFT;
647
        break;
648
    case POWERPC_EXCP_FP_VXISI:
649
        /* Magnitude subtraction of infinities */
650
        env->fpscr |= 1 << FPSCR_VXISI;
651
        goto update_arith;
652
    case POWERPC_EXCP_FP_VXIDI:
653
        /* Division of infinity by infinity */
654
        env->fpscr |= 1 << FPSCR_VXIDI;
655
        goto update_arith;
656
    case POWERPC_EXCP_FP_VXZDZ:
657
        /* Division of zero by zero */
658
        env->fpscr |= 1 << FPSCR_VXZDZ;
659
        goto update_arith;
660
    case POWERPC_EXCP_FP_VXIMZ:
661
        /* Multiplication of zero by infinity */
662
        env->fpscr |= 1 << FPSCR_VXIMZ;
663
        goto update_arith;
664
    case POWERPC_EXCP_FP_VXVC:
665
        /* Ordered comparison of NaN */
666
        env->fpscr |= 1 << FPSCR_VXVC;
667
        env->fpscr &= ~(0xF << FPSCR_FPCC);
668
        env->fpscr |= 0x11 << FPSCR_FPCC;
669
        /* We must update the target FPR before raising the exception */
670
        if (ve != 0) {
671
            env->exception_index = POWERPC_EXCP_PROGRAM;
672
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
673
            /* Update the floating-point enabled exception summary */
674
            env->fpscr |= 1 << FPSCR_FEX;
675
            /* Exception is differed */
676
            ve = 0;
677
        }
678
        break;
679
    case POWERPC_EXCP_FP_VXSQRT:
680
        /* Square root of a negative number */
681
        env->fpscr |= 1 << FPSCR_VXSQRT;
682
    update_arith:
683
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
684
        if (ve == 0) {
685
            /* Set the result to quiet NaN */
686
            ret = 0x7FF8000000000000ULL;
687
            env->fpscr &= ~(0xF << FPSCR_FPCC);
688
            env->fpscr |= 0x11 << FPSCR_FPCC;
689
        }
690
        break;
691
    case POWERPC_EXCP_FP_VXCVI:
692
        /* Invalid conversion */
693
        env->fpscr |= 1 << FPSCR_VXCVI;
694
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
695
        if (ve == 0) {
696
            /* Set the result to quiet NaN */
697
            ret = 0x7FF8000000000000ULL;
698
            env->fpscr &= ~(0xF << FPSCR_FPCC);
699
            env->fpscr |= 0x11 << FPSCR_FPCC;
700
        }
701
        break;
702
    }
703
    /* Update the floating-point invalid operation summary */
704
    env->fpscr |= 1 << FPSCR_VX;
705
    /* Update the floating-point exception summary */
706
    env->fpscr |= 1 << FPSCR_FX;
707
    if (ve != 0) {
708
        /* Update the floating-point enabled exception summary */
709
        env->fpscr |= 1 << FPSCR_FEX;
710
        if (msr_fe0 != 0 || msr_fe1 != 0)
711
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
712
    }
713
    return ret;
714
}
715

    
716
static inline void float_zero_divide_excp(void)
717
{
718
    env->fpscr |= 1 << FPSCR_ZX;
719
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
720
    /* Update the floating-point exception summary */
721
    env->fpscr |= 1 << FPSCR_FX;
722
    if (fpscr_ze != 0) {
723
        /* Update the floating-point enabled exception summary */
724
        env->fpscr |= 1 << FPSCR_FEX;
725
        if (msr_fe0 != 0 || msr_fe1 != 0) {
726
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
727
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
728
        }
729
    }
730
}
731

    
732
static inline void float_overflow_excp(void)
733
{
734
    env->fpscr |= 1 << FPSCR_OX;
735
    /* Update the floating-point exception summary */
736
    env->fpscr |= 1 << FPSCR_FX;
737
    if (fpscr_oe != 0) {
738
        /* XXX: should adjust the result */
739
        /* Update the floating-point enabled exception summary */
740
        env->fpscr |= 1 << FPSCR_FEX;
741
        /* We must update the target FPR before raising the exception */
742
        env->exception_index = POWERPC_EXCP_PROGRAM;
743
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
744
    } else {
745
        env->fpscr |= 1 << FPSCR_XX;
746
        env->fpscr |= 1 << FPSCR_FI;
747
    }
748
}
749

    
750
static inline void float_underflow_excp(void)
751
{
752
    env->fpscr |= 1 << FPSCR_UX;
753
    /* Update the floating-point exception summary */
754
    env->fpscr |= 1 << FPSCR_FX;
755
    if (fpscr_ue != 0) {
756
        /* XXX: should adjust the result */
757
        /* Update the floating-point enabled exception summary */
758
        env->fpscr |= 1 << FPSCR_FEX;
759
        /* We must update the target FPR before raising the exception */
760
        env->exception_index = POWERPC_EXCP_PROGRAM;
761
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
762
    }
763
}
764

    
765
static inline void float_inexact_excp(void)
766
{
767
    env->fpscr |= 1 << FPSCR_XX;
768
    /* Update the floating-point exception summary */
769
    env->fpscr |= 1 << FPSCR_FX;
770
    if (fpscr_xe != 0) {
771
        /* Update the floating-point enabled exception summary */
772
        env->fpscr |= 1 << FPSCR_FEX;
773
        /* We must update the target FPR before raising the exception */
774
        env->exception_index = POWERPC_EXCP_PROGRAM;
775
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
776
    }
777
}
778

    
779
static inline void fpscr_set_rounding_mode(void)
780
{
781
    int rnd_type;
782

    
783
    /* Set rounding mode */
784
    switch (fpscr_rn) {
785
    case 0:
786
        /* Best approximation (round to nearest) */
787
        rnd_type = float_round_nearest_even;
788
        break;
789
    case 1:
790
        /* Smaller magnitude (round toward zero) */
791
        rnd_type = float_round_to_zero;
792
        break;
793
    case 2:
794
        /* Round toward +infinite */
795
        rnd_type = float_round_up;
796
        break;
797
    default:
798
    case 3:
799
        /* Round toward -infinite */
800
        rnd_type = float_round_down;
801
        break;
802
    }
803
    set_float_rounding_mode(rnd_type, &env->fp_status);
804
}
805

    
806
void helper_fpscr_clrbit (uint32_t bit)
807
{
808
    int prev;
809

    
810
    prev = (env->fpscr >> bit) & 1;
811
    env->fpscr &= ~(1 << bit);
812
    if (prev == 1) {
813
        switch (bit) {
814
        case FPSCR_RN1:
815
        case FPSCR_RN:
816
            fpscr_set_rounding_mode();
817
            break;
818
        default:
819
            break;
820
        }
821
    }
822
}
823

    
824
void helper_fpscr_setbit (uint32_t bit)
825
{
826
    int prev;
827

    
828
    prev = (env->fpscr >> bit) & 1;
829
    env->fpscr |= 1 << bit;
830
    if (prev == 0) {
831
        switch (bit) {
832
        case FPSCR_VX:
833
            env->fpscr |= 1 << FPSCR_FX;
834
            if (fpscr_ve)
835
                goto raise_ve;
836
        case FPSCR_OX:
837
            env->fpscr |= 1 << FPSCR_FX;
838
            if (fpscr_oe)
839
                goto raise_oe;
840
            break;
841
        case FPSCR_UX:
842
            env->fpscr |= 1 << FPSCR_FX;
843
            if (fpscr_ue)
844
                goto raise_ue;
845
            break;
846
        case FPSCR_ZX:
847
            env->fpscr |= 1 << FPSCR_FX;
848
            if (fpscr_ze)
849
                goto raise_ze;
850
            break;
851
        case FPSCR_XX:
852
            env->fpscr |= 1 << FPSCR_FX;
853
            if (fpscr_xe)
854
                goto raise_xe;
855
            break;
856
        case FPSCR_VXSNAN:
857
        case FPSCR_VXISI:
858
        case FPSCR_VXIDI:
859
        case FPSCR_VXZDZ:
860
        case FPSCR_VXIMZ:
861
        case FPSCR_VXVC:
862
        case FPSCR_VXSOFT:
863
        case FPSCR_VXSQRT:
864
        case FPSCR_VXCVI:
865
            env->fpscr |= 1 << FPSCR_VX;
866
            env->fpscr |= 1 << FPSCR_FX;
867
            if (fpscr_ve != 0)
868
                goto raise_ve;
869
            break;
870
        case FPSCR_VE:
871
            if (fpscr_vx != 0) {
872
            raise_ve:
873
                env->error_code = POWERPC_EXCP_FP;
874
                if (fpscr_vxsnan)
875
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
876
                if (fpscr_vxisi)
877
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
878
                if (fpscr_vxidi)
879
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
880
                if (fpscr_vxzdz)
881
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
882
                if (fpscr_vximz)
883
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
884
                if (fpscr_vxvc)
885
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
886
                if (fpscr_vxsoft)
887
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
888
                if (fpscr_vxsqrt)
889
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
890
                if (fpscr_vxcvi)
891
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
892
                goto raise_excp;
893
            }
894
            break;
895
        case FPSCR_OE:
896
            if (fpscr_ox != 0) {
897
            raise_oe:
898
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
899
                goto raise_excp;
900
            }
901
            break;
902
        case FPSCR_UE:
903
            if (fpscr_ux != 0) {
904
            raise_ue:
905
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
906
                goto raise_excp;
907
            }
908
            break;
909
        case FPSCR_ZE:
910
            if (fpscr_zx != 0) {
911
            raise_ze:
912
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
913
                goto raise_excp;
914
            }
915
            break;
916
        case FPSCR_XE:
917
            if (fpscr_xx != 0) {
918
            raise_xe:
919
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
920
                goto raise_excp;
921
            }
922
            break;
923
        case FPSCR_RN1:
924
        case FPSCR_RN:
925
            fpscr_set_rounding_mode();
926
            break;
927
        default:
928
            break;
929
        raise_excp:
930
            /* Update the floating-point enabled exception summary */
931
            env->fpscr |= 1 << FPSCR_FEX;
932
                /* We have to update Rc1 before raising the exception */
933
            env->exception_index = POWERPC_EXCP_PROGRAM;
934
            break;
935
        }
936
    }
937
}
938

    
939
void helper_store_fpscr (uint64_t arg, uint32_t mask)
940
{
941
    /*
942
     * We use only the 32 LSB of the incoming fpr
943
     */
944
    uint32_t prev, new;
945
    int i;
946

    
947
    prev = env->fpscr;
948
    new = (uint32_t)arg;
949
    new &= ~0x60000000;
950
    new |= prev & 0x60000000;
951
    for (i = 0; i < 8; i++) {
952
        if (mask & (1 << i)) {
953
            env->fpscr &= ~(0xF << (4 * i));
954
            env->fpscr |= new & (0xF << (4 * i));
955
        }
956
    }
957
    /* Update VX and FEX */
958
    if (fpscr_ix != 0)
959
        env->fpscr |= 1 << FPSCR_VX;
960
    else
961
        env->fpscr &= ~(1 << FPSCR_VX);
962
    if ((fpscr_ex & fpscr_eex) != 0) {
963
        env->fpscr |= 1 << FPSCR_FEX;
964
        env->exception_index = POWERPC_EXCP_PROGRAM;
965
        /* XXX: we should compute it properly */
966
        env->error_code = POWERPC_EXCP_FP;
967
    }
968
    else
969
        env->fpscr &= ~(1 << FPSCR_FEX);
970
    fpscr_set_rounding_mode();
971
}
972

    
973
void helper_float_check_status (void)
974
{
975
#ifdef CONFIG_SOFTFLOAT
976
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
977
        (env->error_code & POWERPC_EXCP_FP)) {
978
        /* Differred floating-point exception after target FPR update */
979
        if (msr_fe0 != 0 || msr_fe1 != 0)
980
            helper_raise_exception_err(env->exception_index, env->error_code);
981
    } else {
982
        int status = get_float_exception_flags(&env->fp_status);
983
        if (status & float_flag_divbyzero) {
984
            float_zero_divide_excp();
985
        } else if (status & float_flag_overflow) {
986
            float_overflow_excp();
987
        } else if (status & float_flag_underflow) {
988
            float_underflow_excp();
989
        } else if (status & float_flag_inexact) {
990
            float_inexact_excp();
991
        }
992
    }
993
#else
994
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
995
        (env->error_code & POWERPC_EXCP_FP)) {
996
        /* Differred floating-point exception after target FPR update */
997
        if (msr_fe0 != 0 || msr_fe1 != 0)
998
            helper_raise_exception_err(env->exception_index, env->error_code);
999
    }
1000
#endif
1001
}
1002

    
1003
#ifdef CONFIG_SOFTFLOAT
1004
void helper_reset_fpstatus (void)
1005
{
1006
    set_float_exception_flags(0, &env->fp_status);
1007
}
1008
#endif
1009

    
1010
/* fadd - fadd. */
1011
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
1012
{
1013
    CPU_DoubleU farg1, farg2;
1014

    
1015
    farg1.ll = arg1;
1016
    farg2.ll = arg2;
1017

    
1018
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1019
                 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1020
        /* Magnitude subtraction of infinities */
1021
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1022
    } else {
1023
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1024
                     float64_is_signaling_nan(farg2.d))) {
1025
            /* sNaN addition */
1026
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1027
        }
1028
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1029
    }
1030

    
1031
    return farg1.ll;
1032
}
1033

    
1034
/* fsub - fsub. */
1035
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1036
{
1037
    CPU_DoubleU farg1, farg2;
1038

    
1039
    farg1.ll = arg1;
1040
    farg2.ll = arg2;
1041

    
1042
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1043
                 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1044
        /* Magnitude subtraction of infinities */
1045
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1046
    } else {
1047
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1048
                     float64_is_signaling_nan(farg2.d))) {
1049
            /* sNaN subtraction */
1050
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1051
        }
1052
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1053
    }
1054

    
1055
    return farg1.ll;
1056
}
1057

    
1058
/* fmul - fmul. */
1059
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1060
{
1061
    CPU_DoubleU farg1, farg2;
1062

    
1063
    farg1.ll = arg1;
1064
    farg2.ll = arg2;
1065

    
1066
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1067
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1068
        /* Multiplication of zero by infinity */
1069
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1070
    } else {
1071
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1072
                     float64_is_signaling_nan(farg2.d))) {
1073
            /* sNaN multiplication */
1074
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1075
        }
1076
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1077
    }
1078

    
1079
    return farg1.ll;
1080
}
1081

    
1082
/* fdiv - fdiv. */
1083
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1084
{
1085
    CPU_DoubleU farg1, farg2;
1086

    
1087
    farg1.ll = arg1;
1088
    farg2.ll = arg2;
1089

    
1090
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1091
        /* Division of infinity by infinity */
1092
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1093
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1094
        /* Division of zero by zero */
1095
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1096
    } else {
1097
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1098
                     float64_is_signaling_nan(farg2.d))) {
1099
            /* sNaN division */
1100
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1101
        }
1102
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1103
    }
1104

    
1105
    return farg1.ll;
1106
}
1107

    
1108
/* fabs */
1109
uint64_t helper_fabs (uint64_t arg)
1110
{
1111
    CPU_DoubleU farg;
1112

    
1113
    farg.ll = arg;
1114
    farg.d = float64_abs(farg.d);
1115
    return farg.ll;
1116
}
1117

    
1118
/* fnabs */
1119
uint64_t helper_fnabs (uint64_t arg)
1120
{
1121
    CPU_DoubleU farg;
1122

    
1123
    farg.ll = arg;
1124
    farg.d = float64_abs(farg.d);
1125
    farg.d = float64_chs(farg.d);
1126
    return farg.ll;
1127
}
1128

    
1129
/* fneg */
1130
uint64_t helper_fneg (uint64_t arg)
1131
{
1132
    CPU_DoubleU farg;
1133

    
1134
    farg.ll = arg;
1135
    farg.d = float64_chs(farg.d);
1136
    return farg.ll;
1137
}
1138

    
1139
/* fctiw - fctiw. */
1140
uint64_t helper_fctiw (uint64_t arg)
1141
{
1142
    CPU_DoubleU farg;
1143
    farg.ll = arg;
1144

    
1145
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1146
        /* sNaN conversion */
1147
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1148
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1149
        /* qNan / infinity conversion */
1150
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1151
    } else {
1152
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1153
        /* XXX: higher bits are not supposed to be significant.
1154
         *     to make tests easier, return the same as a real PowerPC 750
1155
         */
1156
        farg.ll |= 0xFFF80000ULL << 32;
1157
    }
1158
    return farg.ll;
1159
}
1160

    
1161
/* fctiwz - fctiwz. */
1162
uint64_t helper_fctiwz (uint64_t arg)
1163
{
1164
    CPU_DoubleU farg;
1165
    farg.ll = arg;
1166

    
1167
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1168
        /* sNaN conversion */
1169
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1170
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1171
        /* qNan / infinity conversion */
1172
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1173
    } else {
1174
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1175
        /* XXX: higher bits are not supposed to be significant.
1176
         *     to make tests easier, return the same as a real PowerPC 750
1177
         */
1178
        farg.ll |= 0xFFF80000ULL << 32;
1179
    }
1180
    return farg.ll;
1181
}
1182

    
1183
#if defined(TARGET_PPC64)
1184
/* fcfid - fcfid. */
1185
uint64_t helper_fcfid (uint64_t arg)
1186
{
1187
    CPU_DoubleU farg;
1188
    farg.d = int64_to_float64(arg, &env->fp_status);
1189
    return farg.ll;
1190
}
1191

    
1192
/* fctid - fctid. */
1193
uint64_t helper_fctid (uint64_t arg)
1194
{
1195
    CPU_DoubleU farg;
1196
    farg.ll = arg;
1197

    
1198
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1199
        /* sNaN conversion */
1200
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1201
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1202
        /* qNan / infinity conversion */
1203
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1204
    } else {
1205
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1206
    }
1207
    return farg.ll;
1208
}
1209

    
1210
/* fctidz - fctidz. */
1211
uint64_t helper_fctidz (uint64_t arg)
1212
{
1213
    CPU_DoubleU farg;
1214
    farg.ll = arg;
1215

    
1216
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1217
        /* sNaN conversion */
1218
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1219
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1220
        /* qNan / infinity conversion */
1221
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1222
    } else {
1223
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1224
    }
1225
    return farg.ll;
1226
}
1227

    
1228
#endif
1229

    
1230
static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1231
{
1232
    CPU_DoubleU farg;
1233
    farg.ll = arg;
1234

    
1235
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1236
        /* sNaN round */
1237
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1238
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1239
        /* qNan / infinity round */
1240
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1241
    } else {
1242
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1243
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1244
        /* Restore rounding mode from FPSCR */
1245
        fpscr_set_rounding_mode();
1246
    }
1247
    return farg.ll;
1248
}
1249

    
1250
uint64_t helper_frin (uint64_t arg)
1251
{
1252
    return do_fri(arg, float_round_nearest_even);
1253
}
1254

    
1255
uint64_t helper_friz (uint64_t arg)
1256
{
1257
    return do_fri(arg, float_round_to_zero);
1258
}
1259

    
1260
uint64_t helper_frip (uint64_t arg)
1261
{
1262
    return do_fri(arg, float_round_up);
1263
}
1264

    
1265
uint64_t helper_frim (uint64_t arg)
1266
{
1267
    return do_fri(arg, float_round_down);
1268
}
1269

    
1270
/* fmadd - fmadd. */
1271
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1272
{
1273
    CPU_DoubleU farg1, farg2, farg3;
1274

    
1275
    farg1.ll = arg1;
1276
    farg2.ll = arg2;
1277
    farg3.ll = arg3;
1278

    
1279
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1280
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1281
        /* Multiplication of zero by infinity */
1282
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1283
    } else {
1284
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1285
                     float64_is_signaling_nan(farg2.d) ||
1286
                     float64_is_signaling_nan(farg3.d))) {
1287
            /* sNaN operation */
1288
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1289
        }
1290
        /* This is the way the PowerPC specification defines it */
1291
        float128 ft0_128, ft1_128;
1292

    
1293
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1294
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1295
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1296
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1297
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1298
            /* Magnitude subtraction of infinities */
1299
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1300
        } else {
1301
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1302
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1303
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1304
        }
1305
    }
1306

    
1307
    return farg1.ll;
1308
}
1309

    
1310
/* fmsub - fmsub. */
1311
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1312
{
1313
    CPU_DoubleU farg1, farg2, farg3;
1314

    
1315
    farg1.ll = arg1;
1316
    farg2.ll = arg2;
1317
    farg3.ll = arg3;
1318

    
1319
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1320
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1321
        /* Multiplication of zero by infinity */
1322
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1323
    } else {
1324
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1325
                     float64_is_signaling_nan(farg2.d) ||
1326
                     float64_is_signaling_nan(farg3.d))) {
1327
            /* sNaN operation */
1328
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1329
        }
1330
        /* This is the way the PowerPC specification defines it */
1331
        float128 ft0_128, ft1_128;
1332

    
1333
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1334
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1335
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1336
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1337
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1338
            /* Magnitude subtraction of infinities */
1339
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1340
        } else {
1341
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1342
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1343
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1344
        }
1345
    }
1346
    return farg1.ll;
1347
}
1348

    
1349
/* fnmadd - fnmadd. */
1350
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1351
{
1352
    CPU_DoubleU farg1, farg2, farg3;
1353

    
1354
    farg1.ll = arg1;
1355
    farg2.ll = arg2;
1356
    farg3.ll = arg3;
1357

    
1358
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1359
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1360
        /* Multiplication of zero by infinity */
1361
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1362
    } else {
1363
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1364
                     float64_is_signaling_nan(farg2.d) ||
1365
                     float64_is_signaling_nan(farg3.d))) {
1366
            /* sNaN operation */
1367
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1368
        }
1369
        /* This is the way the PowerPC specification defines it */
1370
        float128 ft0_128, ft1_128;
1371

    
1372
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1373
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1374
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1375
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1376
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1377
            /* Magnitude subtraction of infinities */
1378
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1379
        } else {
1380
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1381
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1382
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1383
        }
1384
        if (likely(!float64_is_any_nan(farg1.d))) {
1385
            farg1.d = float64_chs(farg1.d);
1386
        }
1387
    }
1388
    return farg1.ll;
1389
}
1390

    
1391
/* fnmsub - fnmsub. */
1392
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1393
{
1394
    CPU_DoubleU farg1, farg2, farg3;
1395

    
1396
    farg1.ll = arg1;
1397
    farg2.ll = arg2;
1398
    farg3.ll = arg3;
1399

    
1400
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1401
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1402
        /* Multiplication of zero by infinity */
1403
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1404
    } else {
1405
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1406
                     float64_is_signaling_nan(farg2.d) ||
1407
                     float64_is_signaling_nan(farg3.d))) {
1408
            /* sNaN operation */
1409
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1410
        }
1411
        /* This is the way the PowerPC specification defines it */
1412
        float128 ft0_128, ft1_128;
1413

    
1414
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1415
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1416
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1417
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1418
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1419
            /* Magnitude subtraction of infinities */
1420
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1421
        } else {
1422
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1423
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1424
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1425
        }
1426
        if (likely(!float64_is_any_nan(farg1.d))) {
1427
            farg1.d = float64_chs(farg1.d);
1428
        }
1429
    }
1430
    return farg1.ll;
1431
}
1432

    
1433
/* frsp - frsp. */
1434
uint64_t helper_frsp (uint64_t arg)
1435
{
1436
    CPU_DoubleU farg;
1437
    float32 f32;
1438
    farg.ll = arg;
1439

    
1440
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1441
        /* sNaN square root */
1442
       fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1443
    }
1444
    f32 = float64_to_float32(farg.d, &env->fp_status);
1445
    farg.d = float32_to_float64(f32, &env->fp_status);
1446

    
1447
    return farg.ll;
1448
}
1449

    
1450
/* fsqrt - fsqrt. */
1451
uint64_t helper_fsqrt (uint64_t arg)
1452
{
1453
    CPU_DoubleU farg;
1454
    farg.ll = arg;
1455

    
1456
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1457
        /* Square root of a negative nonzero number */
1458
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1459
    } else {
1460
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1461
            /* sNaN square root */
1462
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1463
        }
1464
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1465
    }
1466
    return farg.ll;
1467
}
1468

    
1469
/* fre - fre. */
1470
uint64_t helper_fre (uint64_t arg)
1471
{
1472
    CPU_DoubleU farg;
1473
    farg.ll = arg;
1474

    
1475
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1476
        /* sNaN reciprocal */
1477
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1478
    }
1479
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1480
    return farg.d;
1481
}
1482

    
1483
/* fres - fres. */
1484
uint64_t helper_fres (uint64_t arg)
1485
{
1486
    CPU_DoubleU farg;
1487
    float32 f32;
1488
    farg.ll = arg;
1489

    
1490
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1491
        /* sNaN reciprocal */
1492
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1493
    }
1494
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1495
    f32 = float64_to_float32(farg.d, &env->fp_status);
1496
    farg.d = float32_to_float64(f32, &env->fp_status);
1497

    
1498
    return farg.ll;
1499
}
1500

    
1501
/* frsqrte  - frsqrte. */
1502
uint64_t helper_frsqrte (uint64_t arg)
1503
{
1504
    CPU_DoubleU farg;
1505
    float32 f32;
1506
    farg.ll = arg;
1507

    
1508
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1509
        /* Reciprocal square root of a negative nonzero number */
1510
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1511
    } else {
1512
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1513
            /* sNaN reciprocal square root */
1514
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1515
        }
1516
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1517
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1518
        f32 = float64_to_float32(farg.d, &env->fp_status);
1519
        farg.d = float32_to_float64(f32, &env->fp_status);
1520
    }
1521
    return farg.ll;
1522
}
1523

    
1524
/* fsel - fsel. */
1525
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1526
{
1527
    CPU_DoubleU farg1;
1528

    
1529
    farg1.ll = arg1;
1530

    
1531
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_any_nan(farg1.d)) {
1532
        return arg2;
1533
    } else {
1534
        return arg3;
1535
    }
1536
}
1537

    
1538
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1539
{
1540
    CPU_DoubleU farg1, farg2;
1541
    uint32_t ret = 0;
1542
    farg1.ll = arg1;
1543
    farg2.ll = arg2;
1544

    
1545
    if (unlikely(float64_is_any_nan(farg1.d) ||
1546
                 float64_is_any_nan(farg2.d))) {
1547
        ret = 0x01UL;
1548
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1549
        ret = 0x08UL;
1550
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1551
        ret = 0x04UL;
1552
    } else {
1553
        ret = 0x02UL;
1554
    }
1555

    
1556
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1557
    env->fpscr |= ret << FPSCR_FPRF;
1558
    env->crf[crfD] = ret;
1559
    if (unlikely(ret == 0x01UL
1560
                 && (float64_is_signaling_nan(farg1.d) ||
1561
                     float64_is_signaling_nan(farg2.d)))) {
1562
        /* sNaN comparison */
1563
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1564
    }
1565
}
1566

    
1567
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1568
{
1569
    CPU_DoubleU farg1, farg2;
1570
    uint32_t ret = 0;
1571
    farg1.ll = arg1;
1572
    farg2.ll = arg2;
1573

    
1574
    if (unlikely(float64_is_any_nan(farg1.d) ||
1575
                 float64_is_any_nan(farg2.d))) {
1576
        ret = 0x01UL;
1577
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1578
        ret = 0x08UL;
1579
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1580
        ret = 0x04UL;
1581
    } else {
1582
        ret = 0x02UL;
1583
    }
1584

    
1585
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1586
    env->fpscr |= ret << FPSCR_FPRF;
1587
    env->crf[crfD] = ret;
1588
    if (unlikely (ret == 0x01UL)) {
1589
        if (float64_is_signaling_nan(farg1.d) ||
1590
            float64_is_signaling_nan(farg2.d)) {
1591
            /* sNaN comparison */
1592
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1593
                                  POWERPC_EXCP_FP_VXVC);
1594
        } else {
1595
            /* qNaN comparison */
1596
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1597
        }
1598
    }
1599
}
1600

    
1601
#if !defined (CONFIG_USER_ONLY)
1602
void helper_store_msr (target_ulong val)
1603
{
1604
    val = hreg_store_msr(env, val, 0);
1605
    if (val != 0) {
1606
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1607
        helper_raise_exception(val);
1608
    }
1609
}
1610

    
1611
static inline void do_rfi(target_ulong nip, target_ulong msr,
1612
                          target_ulong msrm, int keep_msrh)
1613
{
1614
#if defined(TARGET_PPC64)
1615
    if (msr & (1ULL << MSR_SF)) {
1616
        nip = (uint64_t)nip;
1617
        msr &= (uint64_t)msrm;
1618
    } else {
1619
        nip = (uint32_t)nip;
1620
        msr = (uint32_t)(msr & msrm);
1621
        if (keep_msrh)
1622
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1623
    }
1624
#else
1625
    nip = (uint32_t)nip;
1626
    msr &= (uint32_t)msrm;
1627
#endif
1628
    /* XXX: beware: this is false if VLE is supported */
1629
    env->nip = nip & ~((target_ulong)0x00000003);
1630
    hreg_store_msr(env, msr, 1);
1631
#if defined (DEBUG_OP)
1632
    cpu_dump_rfi(env->nip, env->msr);
1633
#endif
1634
    /* No need to raise an exception here,
1635
     * as rfi is always the last insn of a TB
1636
     */
1637
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1638
}
1639

    
1640
void helper_rfi (void)
1641
{
1642
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1643
           ~((target_ulong)0x783F0000), 1);
1644
}
1645

    
1646
#if defined(TARGET_PPC64)
1647
void helper_rfid (void)
1648
{
1649
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1650
           ~((target_ulong)0x783F0000), 0);
1651
}
1652

    
1653
void helper_hrfid (void)
1654
{
1655
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1656
           ~((target_ulong)0x783F0000), 0);
1657
}
1658
#endif
1659
#endif
1660

    
1661
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1662
{
1663
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1664
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1665
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1666
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1667
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1668
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1669
    }
1670
}
1671

    
1672
#if defined(TARGET_PPC64)
1673
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1674
{
1675
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1676
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1677
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1678
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1679
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1680
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1681
}
1682
#endif
1683

    
1684
/*****************************************************************************/
1685
/* PowerPC 601 specific instructions (POWER bridge) */
1686

    
1687
target_ulong helper_clcs (uint32_t arg)
1688
{
1689
    switch (arg) {
1690
    case 0x0CUL:
1691
        /* Instruction cache line size */
1692
        return env->icache_line_size;
1693
        break;
1694
    case 0x0DUL:
1695
        /* Data cache line size */
1696
        return env->dcache_line_size;
1697
        break;
1698
    case 0x0EUL:
1699
        /* Minimum cache line size */
1700
        return (env->icache_line_size < env->dcache_line_size) ?
1701
                env->icache_line_size : env->dcache_line_size;
1702
        break;
1703
    case 0x0FUL:
1704
        /* Maximum cache line size */
1705
        return (env->icache_line_size > env->dcache_line_size) ?
1706
                env->icache_line_size : env->dcache_line_size;
1707
        break;
1708
    default:
1709
        /* Undefined */
1710
        return 0;
1711
        break;
1712
    }
1713
}
1714

    
1715
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1716
{
1717
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1718

    
1719
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1720
        (int32_t)arg2 == 0) {
1721
        env->spr[SPR_MQ] = 0;
1722
        return INT32_MIN;
1723
    } else {
1724
        env->spr[SPR_MQ] = tmp % arg2;
1725
        return  tmp / (int32_t)arg2;
1726
    }
1727
}
1728

    
1729
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1730
{
1731
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1732

    
1733
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1734
        (int32_t)arg2 == 0) {
1735
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1736
        env->spr[SPR_MQ] = 0;
1737
        return INT32_MIN;
1738
    } else {
1739
        env->spr[SPR_MQ] = tmp % arg2;
1740
        tmp /= (int32_t)arg2;
1741
        if ((int32_t)tmp != tmp) {
1742
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1743
        } else {
1744
            env->xer &= ~(1 << XER_OV);
1745
        }
1746
        return tmp;
1747
    }
1748
}
1749

    
1750
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1751
{
1752
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1753
        (int32_t)arg2 == 0) {
1754
        env->spr[SPR_MQ] = 0;
1755
        return INT32_MIN;
1756
    } else {
1757
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1758
        return (int32_t)arg1 / (int32_t)arg2;
1759
    }
1760
}
1761

    
1762
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1763
{
1764
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1765
        (int32_t)arg2 == 0) {
1766
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1767
        env->spr[SPR_MQ] = 0;
1768
        return INT32_MIN;
1769
    } else {
1770
        env->xer &= ~(1 << XER_OV);
1771
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1772
        return (int32_t)arg1 / (int32_t)arg2;
1773
    }
1774
}
1775

    
1776
#if !defined (CONFIG_USER_ONLY)
1777
target_ulong helper_rac (target_ulong addr)
1778
{
1779
    mmu_ctx_t ctx;
1780
    int nb_BATs;
1781
    target_ulong ret = 0;
1782

    
1783
    /* We don't have to generate many instances of this instruction,
1784
     * as rac is supervisor only.
1785
     */
1786
    /* XXX: FIX THIS: Pretend we have no BAT */
1787
    nb_BATs = env->nb_BATs;
1788
    env->nb_BATs = 0;
1789
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1790
        ret = ctx.raddr;
1791
    env->nb_BATs = nb_BATs;
1792
    return ret;
1793
}
1794

    
1795
void helper_rfsvc (void)
1796
{
1797
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1798
}
1799
#endif
1800

    
1801
/*****************************************************************************/
1802
/* 602 specific instructions */
1803
/* mfrom is the most crazy instruction ever seen, imho ! */
1804
/* Real implementation uses a ROM table. Do the same */
1805
/* Extremly decomposed:
1806
 *                      -arg / 256
1807
 * return 256 * log10(10           + 1.0) + 0.5
1808
 */
1809
#if !defined (CONFIG_USER_ONLY)
1810
target_ulong helper_602_mfrom (target_ulong arg)
1811
{
1812
    if (likely(arg < 602)) {
1813
#include "mfrom_table.c"
1814
        return mfrom_ROM_table[arg];
1815
    } else {
1816
        return 0;
1817
    }
1818
}
1819
#endif
1820

    
1821
/*****************************************************************************/
1822
/* Embedded PowerPC specific helpers */
1823

    
1824
/* XXX: to be improved to check access rights when in user-mode */
1825
target_ulong helper_load_dcr (target_ulong dcrn)
1826
{
1827
    uint32_t val = 0;
1828

    
1829
    if (unlikely(env->dcr_env == NULL)) {
1830
        qemu_log("No DCR environment\n");
1831
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1832
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1833
    } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1834
        qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1835
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1836
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1837
    }
1838
    return val;
1839
}
1840

    
1841
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1842
{
1843
    if (unlikely(env->dcr_env == NULL)) {
1844
        qemu_log("No DCR environment\n");
1845
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1846
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1847
    } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1848
        qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1849
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1850
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1851
    }
1852
}
1853

    
1854
#if !defined(CONFIG_USER_ONLY)
1855
void helper_40x_rfci (void)
1856
{
1857
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1858
           ~((target_ulong)0xFFFF0000), 0);
1859
}
1860

    
1861
void helper_rfci (void)
1862
{
1863
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1864
           ~((target_ulong)0x3FFF0000), 0);
1865
}
1866

    
1867
void helper_rfdi (void)
1868
{
1869
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1870
           ~((target_ulong)0x3FFF0000), 0);
1871
}
1872

    
1873
void helper_rfmci (void)
1874
{
1875
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1876
           ~((target_ulong)0x3FFF0000), 0);
1877
}
1878
#endif
1879

    
1880
/* 440 specific */
1881
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1882
{
1883
    target_ulong mask;
1884
    int i;
1885

    
1886
    i = 1;
1887
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1888
        if ((high & mask) == 0) {
1889
            if (update_Rc) {
1890
                env->crf[0] = 0x4;
1891
            }
1892
            goto done;
1893
        }
1894
        i++;
1895
    }
1896
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1897
        if ((low & mask) == 0) {
1898
            if (update_Rc) {
1899
                env->crf[0] = 0x8;
1900
            }
1901
            goto done;
1902
        }
1903
        i++;
1904
    }
1905
    if (update_Rc) {
1906
        env->crf[0] = 0x2;
1907
    }
1908
 done:
1909
    env->xer = (env->xer & ~0x7F) | i;
1910
    if (update_Rc) {
1911
        env->crf[0] |= xer_so;
1912
    }
1913
    return i;
1914
}
1915

    
1916
/*****************************************************************************/
1917
/* Altivec extension helpers */
1918
#if defined(HOST_WORDS_BIGENDIAN)
1919
#define HI_IDX 0
1920
#define LO_IDX 1
1921
#else
1922
#define HI_IDX 1
1923
#define LO_IDX 0
1924
#endif
1925

    
1926
#if defined(HOST_WORDS_BIGENDIAN)
1927
#define VECTOR_FOR_INORDER_I(index, element)            \
1928
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1929
#else
1930
#define VECTOR_FOR_INORDER_I(index, element)            \
1931
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1932
#endif
1933

    
1934
/* If X is a NaN, store the corresponding QNaN into RESULT.  Otherwise,
1935
 * execute the following block.  */
1936
#define DO_HANDLE_NAN(result, x)                \
1937
    if (float32_is_any_nan(x)) {                                \
1938
        CPU_FloatU __f;                                         \
1939
        __f.f = x;                                              \
1940
        __f.l = __f.l | (1 << 22);  /* Set QNaN bit. */         \
1941
        result = __f.f;                                         \
1942
    } else
1943

    
1944
#define HANDLE_NAN1(result, x)                  \
1945
    DO_HANDLE_NAN(result, x)
1946
#define HANDLE_NAN2(result, x, y)               \
1947
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1948
#define HANDLE_NAN3(result, x, y, z)            \
1949
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1950

    
1951
/* Saturating arithmetic helpers.  */
1952
#define SATCVT(from, to, from_type, to_type, min, max)                  \
1953
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1954
    {                                                                   \
1955
        to_type r;                                                      \
1956
        if (x < (from_type)min) {                                       \
1957
            r = min;                                                    \
1958
            *sat = 1;                                                   \
1959
        } else if (x > (from_type)max) {                                \
1960
            r = max;                                                    \
1961
            *sat = 1;                                                   \
1962
        } else {                                                        \
1963
            r = x;                                                      \
1964
        }                                                               \
1965
        return r;                                                       \
1966
    }
1967
#define SATCVTU(from, to, from_type, to_type, min, max)                 \
1968
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1969
    {                                                                   \
1970
        to_type r;                                                      \
1971
        if (x > (from_type)max) {                                       \
1972
            r = max;                                                    \
1973
            *sat = 1;                                                   \
1974
        } else {                                                        \
1975
            r = x;                                                      \
1976
        }                                                               \
1977
        return r;                                                       \
1978
    }
1979
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
1980
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
1981
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
1982

    
1983
SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
1984
SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
1985
SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
1986
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
1987
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
1988
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
1989
#undef SATCVT
1990
#undef SATCVTU
1991

    
1992
#define LVE(name, access, swap, element)                        \
1993
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
1994
    {                                                           \
1995
        size_t n_elems = ARRAY_SIZE(r->element);                \
1996
        int adjust = HI_IDX*(n_elems-1);                        \
1997
        int sh = sizeof(r->element[0]) >> 1;                    \
1998
        int index = (addr & 0xf) >> sh;                         \
1999
        if(msr_le) {                                            \
2000
            r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2001
        } else {                                                        \
2002
            r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2003
        }                                                               \
2004
    }
2005
#define I(x) (x)
2006
LVE(lvebx, ldub, I, u8)
2007
LVE(lvehx, lduw, bswap16, u16)
2008
LVE(lvewx, ldl, bswap32, u32)
2009
#undef I
2010
#undef LVE
2011

    
2012
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2013
{
2014
    int i, j = (sh & 0xf);
2015

    
2016
    VECTOR_FOR_INORDER_I (i, u8) {
2017
        r->u8[i] = j++;
2018
    }
2019
}
2020

    
2021
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2022
{
2023
    int i, j = 0x10 - (sh & 0xf);
2024

    
2025
    VECTOR_FOR_INORDER_I (i, u8) {
2026
        r->u8[i] = j++;
2027
    }
2028
}
2029

    
2030
#define STVE(name, access, swap, element)                       \
2031
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2032
    {                                                           \
2033
        size_t n_elems = ARRAY_SIZE(r->element);                \
2034
        int adjust = HI_IDX*(n_elems-1);                        \
2035
        int sh = sizeof(r->element[0]) >> 1;                    \
2036
        int index = (addr & 0xf) >> sh;                         \
2037
        if(msr_le) {                                            \
2038
            access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2039
        } else {                                                        \
2040
            access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2041
        }                                                               \
2042
    }
2043
#define I(x) (x)
2044
STVE(stvebx, stb, I, u8)
2045
STVE(stvehx, stw, bswap16, u16)
2046
STVE(stvewx, stl, bswap32, u32)
2047
#undef I
2048
#undef LVE
2049

    
2050
void helper_mtvscr (ppc_avr_t *r)
2051
{
2052
#if defined(HOST_WORDS_BIGENDIAN)
2053
    env->vscr = r->u32[3];
2054
#else
2055
    env->vscr = r->u32[0];
2056
#endif
2057
    set_flush_to_zero(vscr_nj, &env->vec_status);
2058
}
2059

    
2060
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2061
{
2062
    int i;
2063
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2064
        r->u32[i] = ~a->u32[i] < b->u32[i];
2065
    }
2066
}
2067

    
2068
#define VARITH_DO(name, op, element)        \
2069
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2070
{                                                                       \
2071
    int i;                                                              \
2072
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2073
        r->element[i] = a->element[i] op b->element[i];                 \
2074
    }                                                                   \
2075
}
2076
#define VARITH(suffix, element)                  \
2077
  VARITH_DO(add##suffix, +, element)             \
2078
  VARITH_DO(sub##suffix, -, element)
2079
VARITH(ubm, u8)
2080
VARITH(uhm, u16)
2081
VARITH(uwm, u32)
2082
#undef VARITH_DO
2083
#undef VARITH
2084

    
2085
#define VARITHFP(suffix, func)                                          \
2086
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2087
    {                                                                   \
2088
        int i;                                                          \
2089
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2090
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2091
                r->f[i] = func(a->f[i], b->f[i], &env->vec_status);     \
2092
            }                                                           \
2093
        }                                                               \
2094
    }
2095
VARITHFP(addfp, float32_add)
2096
VARITHFP(subfp, float32_sub)
2097
#undef VARITHFP
2098

    
2099
#define VARITHSAT_CASE(type, op, cvt, element)                          \
2100
    {                                                                   \
2101
        type result = (type)a->element[i] op (type)b->element[i];       \
2102
        r->element[i] = cvt(result, &sat);                              \
2103
    }
2104

    
2105
#define VARITHSAT_DO(name, op, optype, cvt, element)                    \
2106
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2107
    {                                                                   \
2108
        int sat = 0;                                                    \
2109
        int i;                                                          \
2110
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2111
            switch (sizeof(r->element[0])) {                            \
2112
            case 1: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2113
            case 2: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2114
            case 4: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2115
            }                                                           \
2116
        }                                                               \
2117
        if (sat) {                                                      \
2118
            env->vscr |= (1 << VSCR_SAT);                               \
2119
        }                                                               \
2120
    }
2121
#define VARITHSAT_SIGNED(suffix, element, optype, cvt)        \
2122
    VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)    \
2123
    VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2124
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)       \
2125
    VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)     \
2126
    VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2127
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2128
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2129
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2130
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2131
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2132
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2133
#undef VARITHSAT_CASE
2134
#undef VARITHSAT_DO
2135
#undef VARITHSAT_SIGNED
2136
#undef VARITHSAT_UNSIGNED
2137

    
2138
#define VAVG_DO(name, element, etype)                                   \
2139
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2140
    {                                                                   \
2141
        int i;                                                          \
2142
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2143
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2144
            r->element[i] = x >> 1;                                     \
2145
        }                                                               \
2146
    }
2147

    
2148
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2149
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2150
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2151
VAVG(b, s8, int16_t, u8, uint16_t)
2152
VAVG(h, s16, int32_t, u16, uint32_t)
2153
VAVG(w, s32, int64_t, u32, uint64_t)
2154
#undef VAVG_DO
2155
#undef VAVG
2156

    
2157
#define VCF(suffix, cvt, element)                                       \
2158
    void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2159
    {                                                                   \
2160
        int i;                                                          \
2161
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2162
            float32 t = cvt(b->element[i], &env->vec_status);           \
2163
            r->f[i] = float32_scalbn (t, -uim, &env->vec_status);       \
2164
        }                                                               \
2165
    }
2166
VCF(ux, uint32_to_float32, u32)
2167
VCF(sx, int32_to_float32, s32)
2168
#undef VCF
2169

    
2170
#define VCMP_DO(suffix, compare, element, record)                       \
2171
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2172
    {                                                                   \
2173
        uint32_t ones = (uint32_t)-1;                                   \
2174
        uint32_t all = ones;                                            \
2175
        uint32_t none = 0;                                              \
2176
        int i;                                                          \
2177
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2178
            uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2179
            switch (sizeof (a->element[0])) {                           \
2180
            case 4: r->u32[i] = result; break;                          \
2181
            case 2: r->u16[i] = result; break;                          \
2182
            case 1: r->u8[i] = result; break;                           \
2183
            }                                                           \
2184
            all &= result;                                              \
2185
            none |= result;                                             \
2186
        }                                                               \
2187
        if (record) {                                                   \
2188
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2189
        }                                                               \
2190
    }
2191
#define VCMP(suffix, compare, element)          \
2192
    VCMP_DO(suffix, compare, element, 0)        \
2193
    VCMP_DO(suffix##_dot, compare, element, 1)
2194
VCMP(equb, ==, u8)
2195
VCMP(equh, ==, u16)
2196
VCMP(equw, ==, u32)
2197
VCMP(gtub, >, u8)
2198
VCMP(gtuh, >, u16)
2199
VCMP(gtuw, >, u32)
2200
VCMP(gtsb, >, s8)
2201
VCMP(gtsh, >, s16)
2202
VCMP(gtsw, >, s32)
2203
#undef VCMP_DO
2204
#undef VCMP
2205

    
2206
#define VCMPFP_DO(suffix, compare, order, record)                       \
2207
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2208
    {                                                                   \
2209
        uint32_t ones = (uint32_t)-1;                                   \
2210
        uint32_t all = ones;                                            \
2211
        uint32_t none = 0;                                              \
2212
        int i;                                                          \
2213
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2214
            uint32_t result;                                            \
2215
            int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2216
            if (rel == float_relation_unordered) {                      \
2217
                result = 0;                                             \
2218
            } else if (rel compare order) {                             \
2219
                result = ones;                                          \
2220
            } else {                                                    \
2221
                result = 0;                                             \
2222
            }                                                           \
2223
            r->u32[i] = result;                                         \
2224
            all &= result;                                              \
2225
            none |= result;                                             \
2226
        }                                                               \
2227
        if (record) {                                                   \
2228
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2229
        }                                                               \
2230
    }
2231
#define VCMPFP(suffix, compare, order)           \
2232
    VCMPFP_DO(suffix, compare, order, 0)         \
2233
    VCMPFP_DO(suffix##_dot, compare, order, 1)
2234
VCMPFP(eqfp, ==, float_relation_equal)
2235
VCMPFP(gefp, !=, float_relation_less)
2236
VCMPFP(gtfp, ==, float_relation_greater)
2237
#undef VCMPFP_DO
2238
#undef VCMPFP
2239

    
2240
static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2241
                                    int record)
2242
{
2243
    int i;
2244
    int all_in = 0;
2245
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2246
        int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2247
        if (le_rel == float_relation_unordered) {
2248
            r->u32[i] = 0xc0000000;
2249
            /* ALL_IN does not need to be updated here.  */
2250
        } else {
2251
            float32 bneg = float32_chs(b->f[i]);
2252
            int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2253
            int le = le_rel != float_relation_greater;
2254
            int ge = ge_rel != float_relation_less;
2255
            r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2256
            all_in |= (!le | !ge);
2257
        }
2258
    }
2259
    if (record) {
2260
        env->crf[6] = (all_in == 0) << 1;
2261
    }
2262
}
2263

    
2264
void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2265
{
2266
    vcmpbfp_internal(r, a, b, 0);
2267
}
2268

    
2269
void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2270
{
2271
    vcmpbfp_internal(r, a, b, 1);
2272
}
2273

    
2274
#define VCT(suffix, satcvt, element)                                    \
2275
    void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2276
    {                                                                   \
2277
        int i;                                                          \
2278
        int sat = 0;                                                    \
2279
        float_status s = env->vec_status;                               \
2280
        set_float_rounding_mode(float_round_to_zero, &s);               \
2281
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2282
            if (float32_is_any_nan(b->f[i])) {                          \
2283
                r->element[i] = 0;                                      \
2284
            } else {                                                    \
2285
                float64 t = float32_to_float64(b->f[i], &s);            \
2286
                int64_t j;                                              \
2287
                t = float64_scalbn(t, uim, &s);                         \
2288
                j = float64_to_int64(t, &s);                            \
2289
                r->element[i] = satcvt(j, &sat);                        \
2290
            }                                                           \
2291
        }                                                               \
2292
        if (sat) {                                                      \
2293
            env->vscr |= (1 << VSCR_SAT);                               \
2294
        }                                                               \
2295
    }
2296
VCT(uxs, cvtsduw, u32)
2297
VCT(sxs, cvtsdsw, s32)
2298
#undef VCT
2299

    
2300
void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2301
{
2302
    int i;
2303
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2304
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2305
            /* Need to do the computation in higher precision and round
2306
             * once at the end.  */
2307
            float64 af, bf, cf, t;
2308
            af = float32_to_float64(a->f[i], &env->vec_status);
2309
            bf = float32_to_float64(b->f[i], &env->vec_status);
2310
            cf = float32_to_float64(c->f[i], &env->vec_status);
2311
            t = float64_mul(af, cf, &env->vec_status);
2312
            t = float64_add(t, bf, &env->vec_status);
2313
            r->f[i] = float64_to_float32(t, &env->vec_status);
2314
        }
2315
    }
2316
}
2317

    
2318
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2319
{
2320
    int sat = 0;
2321
    int i;
2322

    
2323
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2324
        int32_t prod = a->s16[i] * b->s16[i];
2325
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2326
        r->s16[i] = cvtswsh (t, &sat);
2327
    }
2328

    
2329
    if (sat) {
2330
        env->vscr |= (1 << VSCR_SAT);
2331
    }
2332
}
2333

    
2334
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2335
{
2336
    int sat = 0;
2337
    int i;
2338

    
2339
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2340
        int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2341
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2342
        r->s16[i] = cvtswsh (t, &sat);
2343
    }
2344

    
2345
    if (sat) {
2346
        env->vscr |= (1 << VSCR_SAT);
2347
    }
2348
}
2349

    
2350
#define VMINMAX_DO(name, compare, element)                              \
2351
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2352
    {                                                                   \
2353
        int i;                                                          \
2354
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2355
            if (a->element[i] compare b->element[i]) {                  \
2356
                r->element[i] = b->element[i];                          \
2357
            } else {                                                    \
2358
                r->element[i] = a->element[i];                          \
2359
            }                                                           \
2360
        }                                                               \
2361
    }
2362
#define VMINMAX(suffix, element)                \
2363
  VMINMAX_DO(min##suffix, >, element)           \
2364
  VMINMAX_DO(max##suffix, <, element)
2365
VMINMAX(sb, s8)
2366
VMINMAX(sh, s16)
2367
VMINMAX(sw, s32)
2368
VMINMAX(ub, u8)
2369
VMINMAX(uh, u16)
2370
VMINMAX(uw, u32)
2371
#undef VMINMAX_DO
2372
#undef VMINMAX
2373

    
2374
#define VMINMAXFP(suffix, rT, rF)                                       \
2375
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2376
    {                                                                   \
2377
        int i;                                                          \
2378
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2379
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2380
                if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2381
                    r->f[i] = rT->f[i];                                 \
2382
                } else {                                                \
2383
                    r->f[i] = rF->f[i];                                 \
2384
                }                                                       \
2385
            }                                                           \
2386
        }                                                               \
2387
    }
2388
VMINMAXFP(minfp, a, b)
2389
VMINMAXFP(maxfp, b, a)
2390
#undef VMINMAXFP
2391

    
2392
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2393
{
2394
    int i;
2395
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2396
        int32_t prod = a->s16[i] * b->s16[i];
2397
        r->s16[i] = (int16_t) (prod + c->s16[i]);
2398
    }
2399
}
2400

    
2401
#define VMRG_DO(name, element, highp)                                   \
2402
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2403
    {                                                                   \
2404
        ppc_avr_t result;                                               \
2405
        int i;                                                          \
2406
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2407
        for (i = 0; i < n_elems/2; i++) {                               \
2408
            if (highp) {                                                \
2409
                result.element[i*2+HI_IDX] = a->element[i];             \
2410
                result.element[i*2+LO_IDX] = b->element[i];             \
2411
            } else {                                                    \
2412
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2413
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2414
            }                                                           \
2415
        }                                                               \
2416
        *r = result;                                                    \
2417
    }
2418
#if defined(HOST_WORDS_BIGENDIAN)
2419
#define MRGHI 0
2420
#define MRGLO 1
2421
#else
2422
#define MRGHI 1
2423
#define MRGLO 0
2424
#endif
2425
#define VMRG(suffix, element)                   \
2426
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2427
  VMRG_DO(mrgh##suffix, element, MRGLO)
2428
VMRG(b, u8)
2429
VMRG(h, u16)
2430
VMRG(w, u32)
2431
#undef VMRG_DO
2432
#undef VMRG
2433
#undef MRGHI
2434
#undef MRGLO
2435

    
2436
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2437
{
2438
    int32_t prod[16];
2439
    int i;
2440

    
2441
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2442
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2443
    }
2444

    
2445
    VECTOR_FOR_INORDER_I(i, s32) {
2446
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2447
    }
2448
}
2449

    
2450
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2451
{
2452
    int32_t prod[8];
2453
    int i;
2454

    
2455
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2456
        prod[i] = a->s16[i] * b->s16[i];
2457
    }
2458

    
2459
    VECTOR_FOR_INORDER_I(i, s32) {
2460
        r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2461
    }
2462
}
2463

    
2464
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2465
{
2466
    int32_t prod[8];
2467
    int i;
2468
    int sat = 0;
2469

    
2470
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2471
        prod[i] = (int32_t)a->s16[i] * b->s16[i];
2472
    }
2473

    
2474
    VECTOR_FOR_INORDER_I (i, s32) {
2475
        int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2476
        r->u32[i] = cvtsdsw(t, &sat);
2477
    }
2478

    
2479
    if (sat) {
2480
        env->vscr |= (1 << VSCR_SAT);
2481
    }
2482
}
2483

    
2484
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2485
{
2486
    uint16_t prod[16];
2487
    int i;
2488

    
2489
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2490
        prod[i] = a->u8[i] * b->u8[i];
2491
    }
2492

    
2493
    VECTOR_FOR_INORDER_I(i, u32) {
2494
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2495
    }
2496
}
2497

    
2498
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2499
{
2500
    uint32_t prod[8];
2501
    int i;
2502

    
2503
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2504
        prod[i] = a->u16[i] * b->u16[i];
2505
    }
2506

    
2507
    VECTOR_FOR_INORDER_I(i, u32) {
2508
        r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2509
    }
2510
}
2511

    
2512
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2513
{
2514
    uint32_t prod[8];
2515
    int i;
2516
    int sat = 0;
2517

    
2518
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2519
        prod[i] = a->u16[i] * b->u16[i];
2520
    }
2521

    
2522
    VECTOR_FOR_INORDER_I (i, s32) {
2523
        uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2524
        r->u32[i] = cvtuduw(t, &sat);
2525
    }
2526

    
2527
    if (sat) {
2528
        env->vscr |= (1 << VSCR_SAT);
2529
    }
2530
}
2531

    
2532
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2533
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2534
    {                                                                   \
2535
        int i;                                                          \
2536
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2537
            if (evenp) {                                                \
2538
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2539
            } else {                                                    \
2540
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2541
            }                                                           \
2542
        }                                                               \
2543
    }
2544
#define VMUL(suffix, mul_element, prod_element) \
2545
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2546
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2547
VMUL(sb, s8, s16)
2548
VMUL(sh, s16, s32)
2549
VMUL(ub, u8, u16)
2550
VMUL(uh, u16, u32)
2551
#undef VMUL_DO
2552
#undef VMUL
2553

    
2554
void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2555
{
2556
    int i;
2557
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2558
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2559
            /* Need to do the computation is higher precision and round
2560
             * once at the end.  */
2561
            float64 af, bf, cf, t;
2562
            af = float32_to_float64(a->f[i], &env->vec_status);
2563
            bf = float32_to_float64(b->f[i], &env->vec_status);
2564
            cf = float32_to_float64(c->f[i], &env->vec_status);
2565
            t = float64_mul(af, cf, &env->vec_status);
2566
            t = float64_sub(t, bf, &env->vec_status);
2567
            t = float64_chs(t);
2568
            r->f[i] = float64_to_float32(t, &env->vec_status);
2569
        }
2570
    }
2571
}
2572

    
2573
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2574
{
2575
    ppc_avr_t result;
2576
    int i;
2577
    VECTOR_FOR_INORDER_I (i, u8) {
2578
        int s = c->u8[i] & 0x1f;
2579
#if defined(HOST_WORDS_BIGENDIAN)
2580
        int index = s & 0xf;
2581
#else
2582
        int index = 15 - (s & 0xf);
2583
#endif
2584
        if (s & 0x10) {
2585
            result.u8[i] = b->u8[index];
2586
        } else {
2587
            result.u8[i] = a->u8[index];
2588
        }
2589
    }
2590
    *r = result;
2591
}
2592

    
2593
#if defined(HOST_WORDS_BIGENDIAN)
2594
#define PKBIG 1
2595
#else
2596
#define PKBIG 0
2597
#endif
2598
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2599
{
2600
    int i, j;
2601
    ppc_avr_t result;
2602
#if defined(HOST_WORDS_BIGENDIAN)
2603
    const ppc_avr_t *x[2] = { a, b };
2604
#else
2605
    const ppc_avr_t *x[2] = { b, a };
2606
#endif
2607

    
2608
    VECTOR_FOR_INORDER_I (i, u64) {
2609
        VECTOR_FOR_INORDER_I (j, u32){
2610
            uint32_t e = x[i]->u32[j];
2611
            result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2612
                                 ((e >> 6) & 0x3e0) |
2613
                                 ((e >> 3) & 0x1f));
2614
        }
2615
    }
2616
    *r = result;
2617
}
2618

    
2619
#define VPK(suffix, from, to, cvt, dosat)       \
2620
    void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2621
    {                                                                   \
2622
        int i;                                                          \
2623
        int sat = 0;                                                    \
2624
        ppc_avr_t result;                                               \
2625
        ppc_avr_t *a0 = PKBIG ? a : b;                                  \
2626
        ppc_avr_t *a1 = PKBIG ? b : a;                                  \
2627
        VECTOR_FOR_INORDER_I (i, from) {                                \
2628
            result.to[i] = cvt(a0->from[i], &sat);                      \
2629
            result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
2630
        }                                                               \
2631
        *r = result;                                                    \
2632
        if (dosat && sat) {                                             \
2633
            env->vscr |= (1 << VSCR_SAT);                               \
2634
        }                                                               \
2635
    }
2636
#define I(x, y) (x)
2637
VPK(shss, s16, s8, cvtshsb, 1)
2638
VPK(shus, s16, u8, cvtshub, 1)
2639
VPK(swss, s32, s16, cvtswsh, 1)
2640
VPK(swus, s32, u16, cvtswuh, 1)
2641
VPK(uhus, u16, u8, cvtuhub, 1)
2642
VPK(uwus, u32, u16, cvtuwuh, 1)
2643
VPK(uhum, u16, u8, I, 0)
2644
VPK(uwum, u32, u16, I, 0)
2645
#undef I
2646
#undef VPK
2647
#undef PKBIG
2648

    
2649
void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2650
{
2651
    int i;
2652
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2653
        HANDLE_NAN1(r->f[i], b->f[i]) {
2654
            r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2655
        }
2656
    }
2657
}
2658

    
2659
#define VRFI(suffix, rounding)                                          \
2660
    void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2661
    {                                                                   \
2662
        int i;                                                          \
2663
        float_status s = env->vec_status;                               \
2664
        set_float_rounding_mode(rounding, &s);                          \
2665
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2666
            HANDLE_NAN1(r->f[i], b->f[i]) {                             \
2667
                r->f[i] = float32_round_to_int (b->f[i], &s);           \
2668
            }                                                           \
2669
        }                                                               \
2670
    }
2671
VRFI(n, float_round_nearest_even)
2672
VRFI(m, float_round_down)
2673
VRFI(p, float_round_up)
2674
VRFI(z, float_round_to_zero)
2675
#undef VRFI
2676

    
2677
#define VROTATE(suffix, element)                                        \
2678
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2679
    {                                                                   \
2680
        int i;                                                          \
2681
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2682
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2683
            unsigned int shift = b->element[i] & mask;                  \
2684
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2685
        }                                                               \
2686
    }
2687
VROTATE(b, u8)
2688
VROTATE(h, u16)
2689
VROTATE(w, u32)
2690
#undef VROTATE
2691

    
2692
void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2693
{
2694
    int i;
2695
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2696
        HANDLE_NAN1(r->f[i], b->f[i]) {
2697
            float32 t = float32_sqrt(b->f[i], &env->vec_status);
2698
            r->f[i] = float32_div(float32_one, t, &env->vec_status);
2699
        }
2700
    }
2701
}
2702

    
2703
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2704
{
2705
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2706
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2707
}
2708

    
2709
void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
2710
{
2711
    int i;
2712
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2713
        HANDLE_NAN1(r->f[i], b->f[i]) {
2714
            r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2715
        }
2716
    }
2717
}
2718

    
2719
void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2720
{
2721
    int i;
2722
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2723
        HANDLE_NAN1(r->f[i], b->f[i]) {
2724
            r->f[i] = float32_log2(b->f[i], &env->vec_status);
2725
        }
2726
    }
2727
}
2728

    
2729
#if defined(HOST_WORDS_BIGENDIAN)
2730
#define LEFT 0
2731
#define RIGHT 1
2732
#else
2733
#define LEFT 1
2734
#define RIGHT 0
2735
#endif
2736
/* The specification says that the results are undefined if all of the
2737
 * shift counts are not identical.  We check to make sure that they are
2738
 * to conform to what real hardware appears to do.  */
2739
#define VSHIFT(suffix, leftp)                                           \
2740
    void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
2741
    {                                                                   \
2742
        int shift = b->u8[LO_IDX*15] & 0x7;                             \
2743
        int doit = 1;                                                   \
2744
        int i;                                                          \
2745
        for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
2746
            doit = doit && ((b->u8[i] & 0x7) == shift);                 \
2747
        }                                                               \
2748
        if (doit) {                                                     \
2749
            if (shift == 0) {                                           \
2750
                *r = *a;                                                \
2751
            } else if (leftp) {                                         \
2752
                uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
2753
                r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
2754
                r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
2755
            } else {                                                    \
2756
                uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
2757
                r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
2758
                r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
2759
            }                                                           \
2760
        }                                                               \
2761
    }
2762
VSHIFT(l, LEFT)
2763
VSHIFT(r, RIGHT)
2764
#undef VSHIFT
2765
#undef LEFT
2766
#undef RIGHT
2767

    
2768
#define VSL(suffix, element)                                            \
2769
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2770
    {                                                                   \
2771
        int i;                                                          \
2772
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2773
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2774
            unsigned int shift = b->element[i] & mask;                  \
2775
            r->element[i] = a->element[i] << shift;                     \
2776
        }                                                               \
2777
    }
2778
VSL(b, u8)
2779
VSL(h, u16)
2780
VSL(w, u32)
2781
#undef VSL
2782

    
2783
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2784
{
2785
    int sh = shift & 0xf;
2786
    int i;
2787
    ppc_avr_t result;
2788

    
2789
#if defined(HOST_WORDS_BIGENDIAN)
2790
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2791
        int index = sh + i;
2792
        if (index > 0xf) {
2793
            result.u8[i] = b->u8[index-0x10];
2794
        } else {
2795
            result.u8[i] = a->u8[index];
2796
        }
2797
    }
2798
#else
2799
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2800
        int index = (16 - sh) + i;
2801
        if (index > 0xf) {
2802
            result.u8[i] = a->u8[index-0x10];
2803
        } else {
2804
            result.u8[i] = b->u8[index];
2805
        }
2806
    }
2807
#endif
2808
    *r = result;
2809
}
2810

    
2811
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2812
{
2813
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2814

    
2815
#if defined (HOST_WORDS_BIGENDIAN)
2816
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2817
  memset (&r->u8[16-sh], 0, sh);
2818
#else
2819
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2820
  memset (&r->u8[0], 0, sh);
2821
#endif
2822
}
2823

    
2824
/* Experimental testing shows that hardware masks the immediate.  */
2825
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2826
#if defined(HOST_WORDS_BIGENDIAN)
2827
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2828
#else
2829
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2830
#endif
2831
#define VSPLT(suffix, element)                                          \
2832
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2833
    {                                                                   \
2834
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2835
        int i;                                                          \
2836
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2837
            r->element[i] = s;                                          \
2838
        }                                                               \
2839
    }
2840
VSPLT(b, u8)
2841
VSPLT(h, u16)
2842
VSPLT(w, u32)
2843
#undef VSPLT
2844
#undef SPLAT_ELEMENT
2845
#undef _SPLAT_MASKED
2846

    
2847
#define VSPLTI(suffix, element, splat_type)                     \
2848
    void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat)  \
2849
    {                                                           \
2850
        splat_type x = (int8_t)(splat << 3) >> 3;               \
2851
        int i;                                                  \
2852
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
2853
            r->element[i] = x;                                  \
2854
        }                                                       \
2855
    }
2856
VSPLTI(b, s8, int8_t)
2857
VSPLTI(h, s16, int16_t)
2858
VSPLTI(w, s32, int32_t)
2859
#undef VSPLTI
2860

    
2861
#define VSR(suffix, element)                                            \
2862
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2863
    {                                                                   \
2864
        int i;                                                          \
2865
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2866
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2867
            unsigned int shift = b->element[i] & mask;                  \
2868
            r->element[i] = a->element[i] >> shift;                     \
2869
        }                                                               \
2870
    }
2871
VSR(ab, s8)
2872
VSR(ah, s16)
2873
VSR(aw, s32)
2874
VSR(b, u8)
2875
VSR(h, u16)
2876
VSR(w, u32)
2877
#undef VSR
2878

    
2879
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2880
{
2881
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2882

    
2883
#if defined (HOST_WORDS_BIGENDIAN)
2884
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2885
  memset (&r->u8[0], 0, sh);
2886
#else
2887
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2888
  memset (&r->u8[16-sh], 0, sh);
2889
#endif
2890
}
2891

    
2892
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2893
{
2894
    int i;
2895
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2896
        r->u32[i] = a->u32[i] >= b->u32[i];
2897
    }
2898
}
2899

    
2900
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2901
{
2902
    int64_t t;
2903
    int i, upper;
2904
    ppc_avr_t result;
2905
    int sat = 0;
2906

    
2907
#if defined(HOST_WORDS_BIGENDIAN)
2908
    upper = ARRAY_SIZE(r->s32)-1;
2909
#else
2910
    upper = 0;
2911
#endif
2912
    t = (int64_t)b->s32[upper];
2913
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2914
        t += a->s32[i];
2915
        result.s32[i] = 0;
2916
    }
2917
    result.s32[upper] = cvtsdsw(t, &sat);
2918
    *r = result;
2919

    
2920
    if (sat) {
2921
        env->vscr |= (1 << VSCR_SAT);
2922
    }
2923
}
2924

    
2925
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2926
{
2927
    int i, j, upper;
2928
    ppc_avr_t result;
2929
    int sat = 0;
2930

    
2931
#if defined(HOST_WORDS_BIGENDIAN)
2932
    upper = 1;
2933
#else
2934
    upper = 0;
2935
#endif
2936
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2937
        int64_t t = (int64_t)b->s32[upper+i*2];
2938
        result.u64[i] = 0;
2939
        for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2940
            t += a->s32[2*i+j];
2941
        }
2942
        result.s32[upper+i*2] = cvtsdsw(t, &sat);
2943
    }
2944

    
2945
    *r = result;
2946
    if (sat) {
2947
        env->vscr |= (1 << VSCR_SAT);
2948
    }
2949
}
2950

    
2951
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2952
{
2953
    int i, j;
2954
    int sat = 0;
2955

    
2956
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2957
        int64_t t = (int64_t)b->s32[i];
2958
        for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2959
            t += a->s8[4*i+j];
2960
        }
2961
        r->s32[i] = cvtsdsw(t, &sat);
2962
    }
2963

    
2964
    if (sat) {
2965
        env->vscr |= (1 << VSCR_SAT);
2966
    }
2967
}
2968

    
2969
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2970
{
2971
    int sat = 0;
2972
    int i;
2973

    
2974
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2975
        int64_t t = (int64_t)b->s32[i];
2976
        t += a->s16[2*i] + a->s16[2*i+1];
2977
        r->s32[i] = cvtsdsw(t, &sat);
2978
    }
2979

    
2980
    if (sat) {
2981
        env->vscr |= (1 << VSCR_SAT);
2982
    }
2983
}
2984

    
2985
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2986
{
2987
    int i, j;
2988
    int sat = 0;
2989

    
2990
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2991
        uint64_t t = (uint64_t)b->u32[i];
2992
        for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2993
            t += a->u8[4*i+j];
2994
        }
2995
        r->u32[i] = cvtuduw(t, &sat);
2996
    }
2997

    
2998
    if (sat) {
2999
        env->vscr |= (1 << VSCR_SAT);
3000
    }
3001
}
3002

    
3003
#if defined(HOST_WORDS_BIGENDIAN)
3004
#define UPKHI 1
3005
#define UPKLO 0
3006
#else
3007
#define UPKHI 0
3008
#define UPKLO 1
3009
#endif
3010
#define VUPKPX(suffix, hi)                                      \
3011
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
3012
    {                                                           \
3013
        int i;                                                  \
3014
        ppc_avr_t result;                                       \
3015
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
3016
            uint16_t e = b->u16[hi ? i : i+4];                  \
3017
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
3018
            uint8_t r = (e >> 10) & 0x1f;                       \
3019
            uint8_t g = (e >> 5) & 0x1f;                        \
3020
            uint8_t b = e & 0x1f;                               \
3021
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
3022
        }                                                               \
3023
        *r = result;                                                    \
3024
    }
3025
VUPKPX(lpx, UPKLO)
3026
VUPKPX(hpx, UPKHI)
3027
#undef VUPKPX
3028

    
3029
#define VUPK(suffix, unpacked, packee, hi)                              \
3030
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
3031
    {                                                                   \
3032
        int i;                                                          \
3033
        ppc_avr_t result;                                               \
3034
        if (hi) {                                                       \
3035
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
3036
                result.unpacked[i] = b->packee[i];                      \
3037
            }                                                           \
3038
        } else {                                                        \
3039
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3040
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3041
            }                                                           \
3042
        }                                                               \
3043
        *r = result;                                                    \
3044
    }
3045
VUPK(hsb, s16, s8, UPKHI)
3046
VUPK(hsh, s32, s16, UPKHI)
3047
VUPK(lsb, s16, s8, UPKLO)
3048
VUPK(lsh, s32, s16, UPKLO)
3049
#undef VUPK
3050
#undef UPKHI
3051
#undef UPKLO
3052

    
3053
#undef DO_HANDLE_NAN
3054
#undef HANDLE_NAN1
3055
#undef HANDLE_NAN2
3056
#undef HANDLE_NAN3
3057
#undef VECTOR_FOR_INORDER_I
3058
#undef HI_IDX
3059
#undef LO_IDX
3060

    
3061
/*****************************************************************************/
3062
/* SPE extension helpers */
3063
/* Use a table to make this quicker */
3064
static uint8_t hbrev[16] = {
3065
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3066
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3067
};
3068

    
3069
static inline uint8_t byte_reverse(uint8_t val)
3070
{
3071
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3072
}
3073

    
3074
static inline uint32_t word_reverse(uint32_t val)
3075
{
3076
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3077
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3078
}
3079

    
3080
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3081
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3082
{
3083
    uint32_t a, b, d, mask;
3084

    
3085
    mask = UINT32_MAX >> (32 - MASKBITS);
3086
    a = arg1 & mask;
3087
    b = arg2 & mask;
3088
    d = word_reverse(1 + word_reverse(a | ~b));
3089
    return (arg1 & ~mask) | (d & b);
3090
}
3091

    
3092
uint32_t helper_cntlsw32 (uint32_t val)
3093
{
3094
    if (val & 0x80000000)
3095
        return clz32(~val);
3096
    else
3097
        return clz32(val);
3098
}
3099

    
3100
uint32_t helper_cntlzw32 (uint32_t val)
3101
{
3102
    return clz32(val);
3103
}
3104

    
3105
/* Single-precision floating-point conversions */
3106
static inline uint32_t efscfsi(uint32_t val)
3107
{
3108
    CPU_FloatU u;
3109

    
3110
    u.f = int32_to_float32(val, &env->vec_status);
3111

    
3112
    return u.l;
3113
}
3114

    
3115
static inline uint32_t efscfui(uint32_t val)
3116
{
3117
    CPU_FloatU u;
3118

    
3119
    u.f = uint32_to_float32(val, &env->vec_status);
3120

    
3121
    return u.l;
3122
}
3123

    
3124
static inline int32_t efsctsi(uint32_t val)
3125
{
3126
    CPU_FloatU u;
3127

    
3128
    u.l = val;
3129
    /* NaN are not treated the same way IEEE 754 does */
3130
    if (unlikely(float32_is_quiet_nan(u.f)))
3131
        return 0;
3132

    
3133
    return float32_to_int32(u.f, &env->vec_status);
3134
}
3135

    
3136
static inline uint32_t efsctui(uint32_t val)
3137
{
3138
    CPU_FloatU u;
3139

    
3140
    u.l = val;
3141
    /* NaN are not treated the same way IEEE 754 does */
3142
    if (unlikely(float32_is_quiet_nan(u.f)))
3143
        return 0;
3144

    
3145
    return float32_to_uint32(u.f, &env->vec_status);
3146
}
3147

    
3148
static inline uint32_t efsctsiz(uint32_t val)
3149
{
3150
    CPU_FloatU u;
3151

    
3152
    u.l = val;
3153
    /* NaN are not treated the same way IEEE 754 does */
3154
    if (unlikely(float32_is_quiet_nan(u.f)))
3155
        return 0;
3156

    
3157
    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3158
}
3159

    
3160
static inline uint32_t efsctuiz(uint32_t val)
3161
{
3162
    CPU_FloatU u;
3163

    
3164
    u.l = val;
3165
    /* NaN are not treated the same way IEEE 754 does */
3166
    if (unlikely(float32_is_quiet_nan(u.f)))
3167
        return 0;
3168

    
3169
    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3170
}
3171

    
3172
static inline uint32_t efscfsf(uint32_t val)
3173
{
3174
    CPU_FloatU u;
3175
    float32 tmp;
3176

    
3177
    u.f = int32_to_float32(val, &env->vec_status);
3178
    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3179
    u.f = float32_div(u.f, tmp, &env->vec_status);
3180

    
3181
    return u.l;
3182
}
3183

    
3184
static inline uint32_t efscfuf(uint32_t val)
3185
{
3186
    CPU_FloatU u;
3187
    float32 tmp;
3188

    
3189
    u.f = uint32_to_float32(val, &env->vec_status);
3190
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3191
    u.f = float32_div(u.f, tmp, &env->vec_status);
3192

    
3193
    return u.l;
3194
}
3195

    
3196
static inline uint32_t efsctsf(uint32_t val)
3197
{
3198
    CPU_FloatU u;
3199
    float32 tmp;
3200

    
3201
    u.l = val;
3202
    /* NaN are not treated the same way IEEE 754 does */
3203
    if (unlikely(float32_is_quiet_nan(u.f)))
3204
        return 0;
3205
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3206
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3207

    
3208
    return float32_to_int32(u.f, &env->vec_status);
3209
}
3210

    
3211
static inline uint32_t efsctuf(uint32_t val)
3212
{
3213
    CPU_FloatU u;
3214
    float32 tmp;
3215

    
3216
    u.l = val;
3217
    /* NaN are not treated the same way IEEE 754 does */
3218
    if (unlikely(float32_is_quiet_nan(u.f)))
3219
        return 0;
3220
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3221
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3222

    
3223
    return float32_to_uint32(u.f, &env->vec_status);
3224
}
3225

    
3226
#define HELPER_SPE_SINGLE_CONV(name)                                          \
3227
uint32_t helper_e##name (uint32_t val)                                        \
3228
{                                                                             \
3229
    return e##name(val);                                                      \
3230
}
3231
/* efscfsi */
3232
HELPER_SPE_SINGLE_CONV(fscfsi);
3233
/* efscfui */
3234
HELPER_SPE_SINGLE_CONV(fscfui);
3235
/* efscfuf */
3236
HELPER_SPE_SINGLE_CONV(fscfuf);
3237
/* efscfsf */
3238
HELPER_SPE_SINGLE_CONV(fscfsf);
3239
/* efsctsi */
3240
HELPER_SPE_SINGLE_CONV(fsctsi);
3241
/* efsctui */
3242
HELPER_SPE_SINGLE_CONV(fsctui);
3243
/* efsctsiz */
3244
HELPER_SPE_SINGLE_CONV(fsctsiz);
3245
/* efsctuiz */
3246
HELPER_SPE_SINGLE_CONV(fsctuiz);
3247
/* efsctsf */
3248
HELPER_SPE_SINGLE_CONV(fsctsf);
3249
/* efsctuf */
3250
HELPER_SPE_SINGLE_CONV(fsctuf);
3251

    
3252
#define HELPER_SPE_VECTOR_CONV(name)                                          \
3253
uint64_t helper_ev##name (uint64_t val)                                       \
3254
{                                                                             \
3255
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
3256
            (uint64_t)e##name(val);                                           \
3257
}
3258
/* evfscfsi */
3259
HELPER_SPE_VECTOR_CONV(fscfsi);
3260
/* evfscfui */
3261
HELPER_SPE_VECTOR_CONV(fscfui);
3262
/* evfscfuf */
3263
HELPER_SPE_VECTOR_CONV(fscfuf);
3264
/* evfscfsf */
3265
HELPER_SPE_VECTOR_CONV(fscfsf);
3266
/* evfsctsi */
3267
HELPER_SPE_VECTOR_CONV(fsctsi);
3268
/* evfsctui */
3269
HELPER_SPE_VECTOR_CONV(fsctui);
3270
/* evfsctsiz */
3271
HELPER_SPE_VECTOR_CONV(fsctsiz);
3272
/* evfsctuiz */
3273
HELPER_SPE_VECTOR_CONV(fsctuiz);
3274
/* evfsctsf */
3275
HELPER_SPE_VECTOR_CONV(fsctsf);
3276
/* evfsctuf */
3277
HELPER_SPE_VECTOR_CONV(fsctuf);
3278

    
3279
/* Single-precision floating-point arithmetic */
3280
static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3281
{
3282
    CPU_FloatU u1, u2;
3283
    u1.l = op1;
3284
    u2.l = op2;
3285
    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3286
    return u1.l;
3287
}
3288

    
3289
static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3290
{
3291
    CPU_FloatU u1, u2;
3292
    u1.l = op1;
3293
    u2.l = op2;
3294
    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3295
    return u1.l;
3296
}
3297

    
3298
static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3299
{
3300
    CPU_FloatU u1, u2;
3301
    u1.l = op1;
3302
    u2.l = op2;
3303
    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3304
    return u1.l;
3305
}
3306

    
3307
static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3308
{
3309
    CPU_FloatU u1, u2;
3310
    u1.l = op1;
3311
    u2.l = op2;
3312
    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3313
    return u1.l;
3314
}
3315

    
3316
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
3317
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3318
{                                                                             \
3319
    return e##name(op1, op2);                                                 \
3320
}
3321
/* efsadd */
3322
HELPER_SPE_SINGLE_ARITH(fsadd);
3323
/* efssub */
3324
HELPER_SPE_SINGLE_ARITH(fssub);
3325
/* efsmul */
3326
HELPER_SPE_SINGLE_ARITH(fsmul);
3327
/* efsdiv */
3328
HELPER_SPE_SINGLE_ARITH(fsdiv);
3329

    
3330
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
3331
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3332
{                                                                             \
3333
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
3334
            (uint64_t)e##name(op1, op2);                                      \
3335
}
3336
/* evfsadd */
3337
HELPER_SPE_VECTOR_ARITH(fsadd);
3338
/* evfssub */
3339
HELPER_SPE_VECTOR_ARITH(fssub);
3340
/* evfsmul */
3341
HELPER_SPE_VECTOR_ARITH(fsmul);
3342
/* evfsdiv */
3343
HELPER_SPE_VECTOR_ARITH(fsdiv);
3344

    
3345
/* Single-precision floating-point comparisons */
3346
static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3347
{
3348
    CPU_FloatU u1, u2;
3349
    u1.l = op1;
3350
    u2.l = op2;
3351
    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3352
}
3353

    
3354
static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3355
{
3356
    CPU_FloatU u1, u2;
3357
    u1.l = op1;
3358
    u2.l = op2;
3359
    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3360
}
3361

    
3362
static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3363
{
3364
    CPU_FloatU u1, u2;
3365
    u1.l = op1;
3366
    u2.l = op2;
3367
    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3368
}
3369

    
3370
static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3371
{
3372
    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
3373
    return efscmplt(op1, op2);
3374
}
3375

    
3376
static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3377
{
3378
    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
3379
    return efscmpgt(op1, op2);
3380
}
3381

    
3382
static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3383
{
3384
    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
3385
    return efscmpeq(op1, op2);
3386
}
3387

    
3388
#define HELPER_SINGLE_SPE_CMP(name)                                           \
3389
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3390
{                                                                             \
3391
    return e##name(op1, op2) << 2;                                            \
3392
}
3393
/* efststlt */
3394
HELPER_SINGLE_SPE_CMP(fststlt);
3395
/* efststgt */
3396
HELPER_SINGLE_SPE_CMP(fststgt);
3397
/* efststeq */
3398
HELPER_SINGLE_SPE_CMP(fststeq);
3399
/* efscmplt */
3400
HELPER_SINGLE_SPE_CMP(fscmplt);
3401
/* efscmpgt */
3402
HELPER_SINGLE_SPE_CMP(fscmpgt);
3403
/* efscmpeq */
3404
HELPER_SINGLE_SPE_CMP(fscmpeq);
3405

    
3406
static inline uint32_t evcmp_merge(int t0, int t1)
3407
{
3408
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3409
}
3410

    
3411
#define HELPER_VECTOR_SPE_CMP(name)                                           \
3412
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3413
{                                                                             \
3414
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
3415
}
3416
/* evfststlt */
3417
HELPER_VECTOR_SPE_CMP(fststlt);
3418
/* evfststgt */
3419
HELPER_VECTOR_SPE_CMP(fststgt);
3420
/* evfststeq */
3421
HELPER_VECTOR_SPE_CMP(fststeq);
3422
/* evfscmplt */
3423
HELPER_VECTOR_SPE_CMP(fscmplt);
3424
/* evfscmpgt */
3425
HELPER_VECTOR_SPE_CMP(fscmpgt);
3426
/* evfscmpeq */
3427
HELPER_VECTOR_SPE_CMP(fscmpeq);
3428

    
3429
/* Double-precision floating-point conversion */
3430
uint64_t helper_efdcfsi (uint32_t val)
3431
{
3432
    CPU_DoubleU u;
3433

    
3434
    u.d = int32_to_float64(val, &env->vec_status);
3435

    
3436
    return u.ll;
3437
}
3438

    
3439
uint64_t helper_efdcfsid (uint64_t val)
3440
{
3441
    CPU_DoubleU u;
3442

    
3443
    u.d = int64_to_float64(val, &env->vec_status);
3444

    
3445
    return u.ll;
3446
}
3447

    
3448
uint64_t helper_efdcfui (uint32_t val)
3449
{
3450
    CPU_DoubleU u;
3451

    
3452
    u.d = uint32_to_float64(val, &env->vec_status);
3453

    
3454
    return u.ll;
3455
}
3456

    
3457
uint64_t helper_efdcfuid (uint64_t val)
3458
{
3459
    CPU_DoubleU u;
3460

    
3461
    u.d = uint64_to_float64(val, &env->vec_status);
3462

    
3463
    return u.ll;
3464
}
3465

    
3466
uint32_t helper_efdctsi (uint64_t val)
3467
{
3468
    CPU_DoubleU u;
3469

    
3470
    u.ll = val;
3471
    /* NaN are not treated the same way IEEE 754 does */
3472
    if (unlikely(float64_is_any_nan(u.d))) {
3473
        return 0;
3474
    }
3475

    
3476
    return float64_to_int32(u.d, &env->vec_status);
3477
}
3478

    
3479
uint32_t helper_efdctui (uint64_t val)
3480
{
3481
    CPU_DoubleU u;
3482

    
3483
    u.ll = val;
3484
    /* NaN are not treated the same way IEEE 754 does */
3485
    if (unlikely(float64_is_any_nan(u.d))) {
3486
        return 0;
3487
    }
3488

    
3489
    return float64_to_uint32(u.d, &env->vec_status);
3490
}
3491

    
3492
uint32_t helper_efdctsiz (uint64_t val)
3493
{
3494
    CPU_DoubleU u;
3495

    
3496
    u.ll = val;
3497
    /* NaN are not treated the same way IEEE 754 does */
3498
    if (unlikely(float64_is_any_nan(u.d))) {
3499
        return 0;
3500
    }
3501

    
3502
    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3503
}
3504

    
3505
uint64_t helper_efdctsidz (uint64_t val)
3506
{
3507
    CPU_DoubleU u;
3508

    
3509
    u.ll = val;
3510
    /* NaN are not treated the same way IEEE 754 does */
3511
    if (unlikely(float64_is_any_nan(u.d))) {
3512
        return 0;
3513
    }
3514

    
3515
    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3516
}
3517

    
3518
uint32_t helper_efdctuiz (uint64_t val)
3519
{
3520
    CPU_DoubleU u;
3521

    
3522
    u.ll = val;
3523
    /* NaN are not treated the same way IEEE 754 does */
3524
    if (unlikely(float64_is_any_nan(u.d))) {
3525
        return 0;
3526
    }
3527

    
3528
    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3529
}
3530

    
3531
uint64_t helper_efdctuidz (uint64_t val)
3532
{
3533
    CPU_DoubleU u;
3534

    
3535
    u.ll = val;
3536
    /* NaN are not treated the same way IEEE 754 does */
3537
    if (unlikely(float64_is_any_nan(u.d))) {
3538
        return 0;
3539
    }
3540

    
3541
    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3542
}
3543

    
3544
uint64_t helper_efdcfsf (uint32_t val)
3545
{
3546
    CPU_DoubleU u;
3547
    float64 tmp;
3548

    
3549
    u.d = int32_to_float64(val, &env->vec_status);
3550
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3551
    u.d = float64_div(u.d, tmp, &env->vec_status);
3552

    
3553
    return u.ll;
3554
}
3555

    
3556
uint64_t helper_efdcfuf (uint32_t val)
3557
{
3558
    CPU_DoubleU u;
3559
    float64 tmp;
3560

    
3561
    u.d = uint32_to_float64(val, &env->vec_status);
3562
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3563
    u.d = float64_div(u.d, tmp, &env->vec_status);
3564

    
3565
    return u.ll;
3566
}
3567

    
3568
uint32_t helper_efdctsf (uint64_t val)
3569
{
3570
    CPU_DoubleU u;
3571
    float64 tmp;
3572

    
3573
    u.ll = val;
3574
    /* NaN are not treated the same way IEEE 754 does */
3575
    if (unlikely(float64_is_any_nan(u.d))) {
3576
        return 0;
3577
    }
3578
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3579
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3580

    
3581
    return float64_to_int32(u.d, &env->vec_status);
3582
}
3583

    
3584
uint32_t helper_efdctuf (uint64_t val)
3585
{
3586
    CPU_DoubleU u;
3587
    float64 tmp;
3588

    
3589
    u.ll = val;
3590
    /* NaN are not treated the same way IEEE 754 does */
3591
    if (unlikely(float64_is_any_nan(u.d))) {
3592
        return 0;
3593
    }
3594
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3595
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3596

    
3597
    return float64_to_uint32(u.d, &env->vec_status);
3598
}
3599

    
3600
uint32_t helper_efscfd (uint64_t val)
3601
{
3602
    CPU_DoubleU u1;
3603
    CPU_FloatU u2;
3604

    
3605
    u1.ll = val;
3606
    u2.f = float64_to_float32(u1.d, &env->vec_status);
3607

    
3608
    return u2.l;
3609
}
3610

    
3611
uint64_t helper_efdcfs (uint32_t val)
3612
{
3613
    CPU_DoubleU u2;
3614
    CPU_FloatU u1;
3615

    
3616
    u1.l = val;
3617
    u2.d = float32_to_float64(u1.f, &env->vec_status);
3618

    
3619
    return u2.ll;
3620
}
3621

    
3622
/* Double precision fixed-point arithmetic */
3623
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3624
{
3625
    CPU_DoubleU u1, u2;
3626
    u1.ll = op1;
3627
    u2.ll = op2;
3628
    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3629
    return u1.ll;
3630
}
3631

    
3632
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3633
{
3634
    CPU_DoubleU u1, u2;
3635
    u1.ll = op1;
3636
    u2.ll = op2;
3637
    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3638
    return u1.ll;
3639
}
3640

    
3641
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3642
{
3643
    CPU_DoubleU u1, u2;
3644
    u1.ll = op1;
3645
    u2.ll = op2;
3646
    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3647
    return u1.ll;
3648
}
3649

    
3650
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3651
{
3652
    CPU_DoubleU u1, u2;
3653
    u1.ll = op1;
3654
    u2.ll = op2;
3655
    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3656
    return u1.ll;
3657
}
3658

    
3659
/* Double precision floating point helpers */
3660
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3661
{
3662
    CPU_DoubleU u1, u2;
3663
    u1.ll = op1;
3664
    u2.ll = op2;
3665
    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3666
}
3667

    
3668
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3669
{
3670
    CPU_DoubleU u1, u2;
3671
    u1.ll = op1;
3672
    u2.ll = op2;
3673
    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3674
}
3675

    
3676
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3677
{
3678
    CPU_DoubleU u1, u2;
3679
    u1.ll = op1;
3680
    u2.ll = op2;
3681
    return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3682
}
3683

    
3684
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3685
{
3686
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3687
    return helper_efdtstlt(op1, op2);
3688
}
3689

    
3690
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3691
{
3692
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3693
    return helper_efdtstgt(op1, op2);
3694
}
3695

    
3696
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3697
{
3698
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3699
    return helper_efdtsteq(op1, op2);
3700
}
3701

    
3702
/*****************************************************************************/
3703
/* Softmmu support */
3704
#if !defined (CONFIG_USER_ONLY)
3705

    
3706
#define MMUSUFFIX _mmu
3707

    
3708
#define SHIFT 0
3709
#include "softmmu_template.h"
3710

    
3711
#define SHIFT 1
3712
#include "softmmu_template.h"
3713

    
3714
#define SHIFT 2
3715
#include "softmmu_template.h"
3716

    
3717
#define SHIFT 3
3718
#include "softmmu_template.h"
3719

    
3720
/* try to fill the TLB and return an exception if error. If retaddr is
3721
   NULL, it means that the function was called in C code (i.e. not
3722
   from generated code or from helper.c) */
3723
/* XXX: fix it to restore all registers */
3724
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3725
{
3726
    TranslationBlock *tb;
3727
    CPUState *saved_env;
3728
    unsigned long pc;
3729
    int ret;
3730

    
3731
    /* XXX: hack to restore env in all cases, even if not called from
3732
       generated code */
3733
    saved_env = env;
3734
    env = cpu_single_env;
3735
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3736
    if (unlikely(ret != 0)) {
3737
        if (likely(retaddr)) {
3738
            /* now we have a real cpu fault */
3739
            pc = (unsigned long)retaddr;
3740
            tb = tb_find_pc(pc);
3741
            if (likely(tb)) {
3742
                /* the PC is inside the translated code. It means that we have
3743
                   a virtual CPU fault */
3744
                cpu_restore_state(tb, env, pc);
3745
            }
3746
        }
3747
        helper_raise_exception_err(env->exception_index, env->error_code);
3748
    }
3749
    env = saved_env;
3750
}
3751

    
3752
/* Segment registers load and store */
3753
target_ulong helper_load_sr (target_ulong sr_num)
3754
{
3755
#if defined(TARGET_PPC64)
3756
    if (env->mmu_model & POWERPC_MMU_64)
3757
        return ppc_load_sr(env, sr_num);
3758
#endif
3759
    return env->sr[sr_num];
3760
}
3761

    
3762
void helper_store_sr (target_ulong sr_num, target_ulong val)
3763
{
3764
    ppc_store_sr(env, sr_num, val);
3765
}
3766

    
3767
/* SLB management */
3768
#if defined(TARGET_PPC64)
3769
void helper_store_slb (target_ulong rb, target_ulong rs)
3770
{
3771
    if (ppc_store_slb(env, rb, rs) < 0) {
3772
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3773
    }
3774
}
3775

    
3776
target_ulong helper_load_slb_esid (target_ulong rb)
3777
{
3778
    target_ulong rt;
3779

    
3780
    if (ppc_load_slb_esid(env, rb, &rt) < 0) {
3781
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3782
    }
3783
    return rt;
3784
}
3785

    
3786
target_ulong helper_load_slb_vsid (target_ulong rb)
3787
{
3788
    target_ulong rt;
3789

    
3790
    if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
3791
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3792
    }
3793
    return rt;
3794
}
3795

    
3796
void helper_slbia (void)
3797
{
3798
    ppc_slb_invalidate_all(env);
3799
}
3800

    
3801
void helper_slbie (target_ulong addr)
3802
{
3803
    ppc_slb_invalidate_one(env, addr);
3804
}
3805

    
3806
#endif /* defined(TARGET_PPC64) */
3807

    
3808
/* TLB management */
3809
void helper_tlbia (void)
3810
{
3811
    ppc_tlb_invalidate_all(env);
3812
}
3813

    
3814
void helper_tlbie (target_ulong addr)
3815
{
3816
    ppc_tlb_invalidate_one(env, addr);
3817
}
3818

    
3819
/* Software driven TLBs management */
3820
/* PowerPC 602/603 software TLB load instructions helpers */
3821
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3822
{
3823
    target_ulong RPN, CMP, EPN;
3824
    int way;
3825

    
3826
    RPN = env->spr[SPR_RPA];
3827
    if (is_code) {
3828
        CMP = env->spr[SPR_ICMP];
3829
        EPN = env->spr[SPR_IMISS];
3830
    } else {
3831
        CMP = env->spr[SPR_DCMP];
3832
        EPN = env->spr[SPR_DMISS];
3833
    }
3834
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3835
    (void)EPN; /* avoid a compiler warning */
3836
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3837
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3838
              RPN, way);
3839
    /* Store this TLB */
3840
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3841
                     way, is_code, CMP, RPN);
3842
}
3843

    
3844
void helper_6xx_tlbd (target_ulong EPN)
3845
{
3846
    do_6xx_tlb(EPN, 0);
3847
}
3848

    
3849
void helper_6xx_tlbi (target_ulong EPN)
3850
{
3851
    do_6xx_tlb(EPN, 1);
3852
}
3853

    
3854
/* PowerPC 74xx software TLB load instructions helpers */
3855
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3856
{
3857
    target_ulong RPN, CMP, EPN;
3858
    int way;
3859

    
3860
    RPN = env->spr[SPR_PTELO];
3861
    CMP = env->spr[SPR_PTEHI];
3862
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3863
    way = env->spr[SPR_TLBMISS] & 0x3;
3864
    (void)EPN; /* avoid a compiler warning */
3865
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3866
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3867
              RPN, way);
3868
    /* Store this TLB */
3869
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3870
                     way, is_code, CMP, RPN);
3871
}
3872

    
3873
void helper_74xx_tlbd (target_ulong EPN)
3874
{
3875
    do_74xx_tlb(EPN, 0);
3876
}
3877

    
3878
void helper_74xx_tlbi (target_ulong EPN)
3879
{
3880
    do_74xx_tlb(EPN, 1);
3881
}
3882

    
3883
static inline target_ulong booke_tlb_to_page_size(int size)
3884
{
3885
    return 1024 << (2 * size);
3886
}
3887

    
3888
static inline int booke_page_size_to_tlb(target_ulong page_size)
3889
{
3890
    int size;
3891

    
3892
    switch (page_size) {
3893
    case 0x00000400UL:
3894
        size = 0x0;
3895
        break;
3896
    case 0x00001000UL:
3897
        size = 0x1;
3898
        break;
3899
    case 0x00004000UL:
3900
        size = 0x2;
3901
        break;
3902
    case 0x00010000UL:
3903
        size = 0x3;
3904
        break;
3905
    case 0x00040000UL:
3906
        size = 0x4;
3907
        break;
3908
    case 0x00100000UL:
3909
        size = 0x5;
3910
        break;
3911
    case 0x00400000UL:
3912
        size = 0x6;
3913
        break;
3914
    case 0x01000000UL:
3915
        size = 0x7;
3916
        break;
3917
    case 0x04000000UL:
3918
        size = 0x8;
3919
        break;
3920
    case 0x10000000UL:
3921
        size = 0x9;
3922
        break;
3923
    case 0x40000000UL:
3924
        size = 0xA;
3925
        break;
3926
#if defined (TARGET_PPC64)
3927
    case 0x000100000000ULL:
3928
        size = 0xB;
3929
        break;
3930
    case 0x000400000000ULL:
3931
        size = 0xC;
3932
        break;
3933
    case 0x001000000000ULL:
3934
        size = 0xD;
3935
        break;
3936
    case 0x004000000000ULL:
3937
        size = 0xE;
3938
        break;
3939
    case 0x010000000000ULL:
3940
        size = 0xF;
3941
        break;
3942
#endif
3943
    default:
3944
        size = -1;
3945
        break;
3946
    }
3947

    
3948
    return size;
3949
}
3950

    
3951
/* Helpers for 4xx TLB management */
3952
#define PPC4XX_TLB_ENTRY_MASK       0x0000003f  /* Mask for 64 TLB entries */
3953

    
3954
#define PPC4XX_TLBHI_V              0x00000040
3955
#define PPC4XX_TLBHI_E              0x00000020
3956
#define PPC4XX_TLBHI_SIZE_MIN       0
3957
#define PPC4XX_TLBHI_SIZE_MAX       7
3958
#define PPC4XX_TLBHI_SIZE_DEFAULT   1
3959
#define PPC4XX_TLBHI_SIZE_SHIFT     7
3960
#define PPC4XX_TLBHI_SIZE_MASK      0x00000007
3961

    
3962
#define PPC4XX_TLBLO_EX             0x00000200
3963
#define PPC4XX_TLBLO_WR             0x00000100
3964
#define PPC4XX_TLBLO_ATTR_MASK      0x000000FF
3965
#define PPC4XX_TLBLO_RPN_MASK       0xFFFFFC00
3966

    
3967
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3968
{
3969
    ppcemb_tlb_t *tlb;
3970
    target_ulong ret;
3971
    int size;
3972

    
3973
    entry &= PPC4XX_TLB_ENTRY_MASK;
3974
    tlb = &env->tlb[entry].tlbe;
3975
    ret = tlb->EPN;
3976
    if (tlb->prot & PAGE_VALID) {
3977
        ret |= PPC4XX_TLBHI_V;
3978
    }
3979
    size = booke_page_size_to_tlb(tlb->size);
3980
    if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
3981
        size = PPC4XX_TLBHI_SIZE_DEFAULT;
3982
    }
3983
    ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
3984
    env->spr[SPR_40x_PID] = tlb->PID;
3985
    return ret;
3986
}
3987

    
3988
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3989
{
3990
    ppcemb_tlb_t *tlb;
3991
    target_ulong ret;
3992

    
3993
    entry &= PPC4XX_TLB_ENTRY_MASK;
3994
    tlb = &env->tlb[entry].tlbe;
3995
    ret = tlb->RPN;
3996
    if (tlb->prot & PAGE_EXEC) {
3997
        ret |= PPC4XX_TLBLO_EX;
3998
    }
3999
    if (tlb->prot & PAGE_WRITE) {
4000
        ret |= PPC4XX_TLBLO_WR;
4001
    }
4002
    return ret;
4003
}
4004

    
4005
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
4006
{
4007
    ppcemb_tlb_t *tlb;
4008
    target_ulong page, end;
4009

    
4010
    LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
4011
              val);
4012
    entry &= PPC4XX_TLB_ENTRY_MASK;
4013
    tlb = &env->tlb[entry].tlbe;
4014
    /* Invalidate previous TLB (if it's valid) */
4015
    if (tlb->prot & PAGE_VALID) {
4016
        end = tlb->EPN + tlb->size;
4017
        LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
4018
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4019
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4020
            tlb_flush_page(env, page);
4021
        }
4022
    }
4023
    tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
4024
                                       & PPC4XX_TLBHI_SIZE_MASK);
4025
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
4026
     * If this ever occurs, one should use the ppcemb target instead
4027
     * of the ppc or ppc64 one
4028
     */
4029
    if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
4030
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
4031
                  "are not supported (%d)\n",
4032
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
4033
    }
4034
    tlb->EPN = val & ~(tlb->size - 1);
4035
    if (val & PPC4XX_TLBHI_V) {
4036
        tlb->prot |= PAGE_VALID;
4037
        if (val & PPC4XX_TLBHI_E) {
4038
            /* XXX: TO BE FIXED */
4039
            cpu_abort(env,
4040
                      "Little-endian TLB entries are not supported by now\n");
4041
        }
4042
    } else {
4043
        tlb->prot &= ~PAGE_VALID;
4044
    }
4045
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
4046
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4047
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4048
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4049
              tlb->prot & PAGE_READ ? 'r' : '-',
4050
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4051
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4052
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4053
    /* Invalidate new TLB (if valid) */
4054
    if (tlb->prot & PAGE_VALID) {
4055
        end = tlb->EPN + tlb->size;
4056
        LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4057
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4058
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4059
            tlb_flush_page(env, page);
4060
        }
4061
    }
4062
}
4063

    
4064
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4065
{
4066
    ppcemb_tlb_t *tlb;
4067

    
4068
    LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4069
              val);
4070
    entry &= PPC4XX_TLB_ENTRY_MASK;
4071
    tlb = &env->tlb[entry].tlbe;
4072
    tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4073
    tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4074
    tlb->prot = PAGE_READ;
4075
    if (val & PPC4XX_TLBLO_EX) {
4076
        tlb->prot |= PAGE_EXEC;
4077
    }
4078
    if (val & PPC4XX_TLBLO_WR) {
4079
        tlb->prot |= PAGE_WRITE;
4080
    }
4081
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4082
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4083
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4084
              tlb->prot & PAGE_READ ? 'r' : '-',
4085
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4086
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4087
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4088
}
4089

    
4090
target_ulong helper_4xx_tlbsx (target_ulong address)
4091
{
4092
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4093
}
4094

    
4095
/* PowerPC 440 TLB management */
4096
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4097
{
4098
    ppcemb_tlb_t *tlb;
4099
    target_ulong EPN, RPN, size;
4100
    int do_flush_tlbs;
4101

    
4102
    LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4103
              __func__, word, (int)entry, value);
4104
    do_flush_tlbs = 0;
4105
    entry &= 0x3F;
4106
    tlb = &env->tlb[entry].tlbe;
4107
    switch (word) {
4108
    default:
4109
        /* Just here to please gcc */
4110
    case 0:
4111
        EPN = value & 0xFFFFFC00;
4112
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4113
            do_flush_tlbs = 1;
4114
        tlb->EPN = EPN;
4115
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
4116
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4117
            do_flush_tlbs = 1;
4118
        tlb->size = size;
4119
        tlb->attr &= ~0x1;
4120
        tlb->attr |= (value >> 8) & 1;
4121
        if (value & 0x200) {
4122
            tlb->prot |= PAGE_VALID;
4123
        } else {
4124
            if (tlb->prot & PAGE_VALID) {
4125
                tlb->prot &= ~PAGE_VALID;
4126
                do_flush_tlbs = 1;
4127
            }
4128
        }
4129
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4130
        if (do_flush_tlbs)
4131
            tlb_flush(env, 1);
4132
        break;
4133
    case 1:
4134
        RPN = value & 0xFFFFFC0F;
4135
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4136
            tlb_flush(env, 1);
4137
        tlb->RPN = RPN;
4138
        break;
4139
    case 2:
4140
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4141
        tlb->prot = tlb->prot & PAGE_VALID;
4142
        if (value & 0x1)
4143
            tlb->prot |= PAGE_READ << 4;
4144
        if (value & 0x2)
4145
            tlb->prot |= PAGE_WRITE << 4;
4146
        if (value & 0x4)
4147
            tlb->prot |= PAGE_EXEC << 4;
4148
        if (value & 0x8)
4149
            tlb->prot |= PAGE_READ;
4150
        if (value & 0x10)
4151
            tlb->prot |= PAGE_WRITE;
4152
        if (value & 0x20)
4153
            tlb->prot |= PAGE_EXEC;
4154
        break;
4155
    }
4156
}
4157

    
4158
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4159
{
4160
    ppcemb_tlb_t *tlb;
4161
    target_ulong ret;
4162
    int size;
4163

    
4164
    entry &= 0x3F;
4165
    tlb = &env->tlb[entry].tlbe;
4166
    switch (word) {
4167
    default:
4168
        /* Just here to please gcc */
4169
    case 0:
4170
        ret = tlb->EPN;
4171
        size = booke_page_size_to_tlb(tlb->size);
4172
        if (size < 0 || size > 0xF)
4173
            size = 1;
4174
        ret |= size << 4;
4175
        if (tlb->attr & 0x1)
4176
            ret |= 0x100;
4177
        if (tlb->prot & PAGE_VALID)
4178
            ret |= 0x200;
4179
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4180
        env->spr[SPR_440_MMUCR] |= tlb->PID;
4181
        break;
4182
    case 1:
4183
        ret = tlb->RPN;
4184
        break;
4185
    case 2:
4186
        ret = tlb->attr & ~0x1;
4187
        if (tlb->prot & (PAGE_READ << 4))
4188
            ret |= 0x1;
4189
        if (tlb->prot & (PAGE_WRITE << 4))
4190
            ret |= 0x2;
4191
        if (tlb->prot & (PAGE_EXEC << 4))
4192
            ret |= 0x4;
4193
        if (tlb->prot & PAGE_READ)
4194
            ret |= 0x8;
4195
        if (tlb->prot & PAGE_WRITE)
4196
            ret |= 0x10;
4197
        if (tlb->prot & PAGE_EXEC)
4198
            ret |= 0x20;
4199
        break;
4200
    }
4201
    return ret;
4202
}
4203

    
4204
target_ulong helper_440_tlbsx (target_ulong address)
4205
{
4206
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4207
}
4208

    
4209
#endif /* !CONFIG_USER_ONLY */