Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ 2c0d18dd

History | View | Annotate | Download (133.9 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <string.h>
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "helper.h"
23

    
24
#include "helper_regs.h"
25

    
26
//#define DEBUG_OP
27
//#define DEBUG_EXCEPTIONS
28
//#define DEBUG_SOFTWARE_TLB
29

    
30
#ifdef DEBUG_SOFTWARE_TLB
31
#  define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
32
#else
33
#  define LOG_SWTLB(...) do { } while (0)
34
#endif
35

    
36

    
37
/*****************************************************************************/
38
/* Exceptions processing helpers */
39

    
40
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
41
{
42
#if 0
43
    printf("Raise exception %3x code : %d\n", exception, error_code);
44
#endif
45
    env->exception_index = exception;
46
    env->error_code = error_code;
47
    cpu_loop_exit();
48
}
49

    
50
void helper_raise_exception (uint32_t exception)
51
{
52
    helper_raise_exception_err(exception, 0);
53
}
54

    
55
/*****************************************************************************/
56
/* SPR accesses */
57
void helper_load_dump_spr (uint32_t sprn)
58
{
59
    qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
60
             env->spr[sprn]);
61
}
62

    
63
void helper_store_dump_spr (uint32_t sprn)
64
{
65
    qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
66
             env->spr[sprn]);
67
}
68

    
69
target_ulong helper_load_tbl (void)
70
{
71
    return (target_ulong)cpu_ppc_load_tbl(env);
72
}
73

    
74
target_ulong helper_load_tbu (void)
75
{
76
    return cpu_ppc_load_tbu(env);
77
}
78

    
79
target_ulong helper_load_atbl (void)
80
{
81
    return (target_ulong)cpu_ppc_load_atbl(env);
82
}
83

    
84
target_ulong helper_load_atbu (void)
85
{
86
    return cpu_ppc_load_atbu(env);
87
}
88

    
89
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
90
target_ulong helper_load_purr (void)
91
{
92
    return (target_ulong)cpu_ppc_load_purr(env);
93
}
94
#endif
95

    
96
target_ulong helper_load_601_rtcl (void)
97
{
98
    return cpu_ppc601_load_rtcl(env);
99
}
100

    
101
target_ulong helper_load_601_rtcu (void)
102
{
103
    return cpu_ppc601_load_rtcu(env);
104
}
105

    
106
#if !defined(CONFIG_USER_ONLY)
107
#if defined (TARGET_PPC64)
108
void helper_store_asr (target_ulong val)
109
{
110
    ppc_store_asr(env, val);
111
}
112
#endif
113

    
114
void helper_store_sdr1 (target_ulong val)
115
{
116
    ppc_store_sdr1(env, val);
117
}
118

    
119
void helper_store_tbl (target_ulong val)
120
{
121
    cpu_ppc_store_tbl(env, val);
122
}
123

    
124
void helper_store_tbu (target_ulong val)
125
{
126
    cpu_ppc_store_tbu(env, val);
127
}
128

    
129
void helper_store_atbl (target_ulong val)
130
{
131
    cpu_ppc_store_atbl(env, val);
132
}
133

    
134
void helper_store_atbu (target_ulong val)
135
{
136
    cpu_ppc_store_atbu(env, val);
137
}
138

    
139
void helper_store_601_rtcl (target_ulong val)
140
{
141
    cpu_ppc601_store_rtcl(env, val);
142
}
143

    
144
void helper_store_601_rtcu (target_ulong val)
145
{
146
    cpu_ppc601_store_rtcu(env, val);
147
}
148

    
149
target_ulong helper_load_decr (void)
150
{
151
    return cpu_ppc_load_decr(env);
152
}
153

    
154
void helper_store_decr (target_ulong val)
155
{
156
    cpu_ppc_store_decr(env, val);
157
}
158

    
159
void helper_store_hid0_601 (target_ulong val)
160
{
161
    target_ulong hid0;
162

    
163
    hid0 = env->spr[SPR_HID0];
164
    if ((val ^ hid0) & 0x00000008) {
165
        /* Change current endianness */
166
        env->hflags &= ~(1 << MSR_LE);
167
        env->hflags_nmsr &= ~(1 << MSR_LE);
168
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
169
        env->hflags |= env->hflags_nmsr;
170
        qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
171
                 val & 0x8 ? 'l' : 'b', env->hflags);
172
    }
173
    env->spr[SPR_HID0] = (uint32_t)val;
174
}
175

    
176
void helper_store_403_pbr (uint32_t num, target_ulong value)
177
{
178
    if (likely(env->pb[num] != value)) {
179
        env->pb[num] = value;
180
        /* Should be optimized */
181
        tlb_flush(env, 1);
182
    }
183
}
184

    
185
target_ulong helper_load_40x_pit (void)
186
{
187
    return load_40x_pit(env);
188
}
189

    
190
void helper_store_40x_pit (target_ulong val)
191
{
192
    store_40x_pit(env, val);
193
}
194

    
195
void helper_store_40x_dbcr0 (target_ulong val)
196
{
197
    store_40x_dbcr0(env, val);
198
}
199

    
200
void helper_store_40x_sler (target_ulong val)
201
{
202
    store_40x_sler(env, val);
203
}
204

    
205
void helper_store_booke_tcr (target_ulong val)
206
{
207
    store_booke_tcr(env, val);
208
}
209

    
210
void helper_store_booke_tsr (target_ulong val)
211
{
212
    store_booke_tsr(env, val);
213
}
214

    
215
void helper_store_ibatu (uint32_t nr, target_ulong val)
216
{
217
    ppc_store_ibatu(env, nr, val);
218
}
219

    
220
void helper_store_ibatl (uint32_t nr, target_ulong val)
221
{
222
    ppc_store_ibatl(env, nr, val);
223
}
224

    
225
void helper_store_dbatu (uint32_t nr, target_ulong val)
226
{
227
    ppc_store_dbatu(env, nr, val);
228
}
229

    
230
void helper_store_dbatl (uint32_t nr, target_ulong val)
231
{
232
    ppc_store_dbatl(env, nr, val);
233
}
234

    
235
void helper_store_601_batl (uint32_t nr, target_ulong val)
236
{
237
    ppc_store_ibatl_601(env, nr, val);
238
}
239

    
240
void helper_store_601_batu (uint32_t nr, target_ulong val)
241
{
242
    ppc_store_ibatu_601(env, nr, val);
243
}
244
#endif
245

    
246
/*****************************************************************************/
247
/* Memory load and stores */
248

    
249
static inline target_ulong addr_add(target_ulong addr, target_long arg)
250
{
251
#if defined(TARGET_PPC64)
252
        if (!msr_sf)
253
            return (uint32_t)(addr + arg);
254
        else
255
#endif
256
            return addr + arg;
257
}
258

    
259
void helper_lmw (target_ulong addr, uint32_t reg)
260
{
261
    for (; reg < 32; reg++) {
262
        if (msr_le)
263
            env->gpr[reg] = bswap32(ldl(addr));
264
        else
265
            env->gpr[reg] = ldl(addr);
266
        addr = addr_add(addr, 4);
267
    }
268
}
269

    
270
void helper_stmw (target_ulong addr, uint32_t reg)
271
{
272
    for (; reg < 32; reg++) {
273
        if (msr_le)
274
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
275
        else
276
            stl(addr, (uint32_t)env->gpr[reg]);
277
        addr = addr_add(addr, 4);
278
    }
279
}
280

    
281
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
282
{
283
    int sh;
284
    for (; nb > 3; nb -= 4) {
285
        env->gpr[reg] = ldl(addr);
286
        reg = (reg + 1) % 32;
287
        addr = addr_add(addr, 4);
288
    }
289
    if (unlikely(nb > 0)) {
290
        env->gpr[reg] = 0;
291
        for (sh = 24; nb > 0; nb--, sh -= 8) {
292
            env->gpr[reg] |= ldub(addr) << sh;
293
            addr = addr_add(addr, 1);
294
        }
295
    }
296
}
297
/* PPC32 specification says we must generate an exception if
298
 * rA is in the range of registers to be loaded.
299
 * In an other hand, IBM says this is valid, but rA won't be loaded.
300
 * For now, I'll follow the spec...
301
 */
302
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
303
{
304
    if (likely(xer_bc != 0)) {
305
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
306
                     (reg < rb && (reg + xer_bc) > rb))) {
307
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
308
                                       POWERPC_EXCP_INVAL |
309
                                       POWERPC_EXCP_INVAL_LSWX);
310
        } else {
311
            helper_lsw(addr, xer_bc, reg);
312
        }
313
    }
314
}
315

    
316
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
317
{
318
    int sh;
319
    for (; nb > 3; nb -= 4) {
320
        stl(addr, env->gpr[reg]);
321
        reg = (reg + 1) % 32;
322
        addr = addr_add(addr, 4);
323
    }
324
    if (unlikely(nb > 0)) {
325
        for (sh = 24; nb > 0; nb--, sh -= 8) {
326
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
327
            addr = addr_add(addr, 1);
328
        }
329
    }
330
}
331

    
332
static void do_dcbz(target_ulong addr, int dcache_line_size)
333
{
334
    addr &= ~(dcache_line_size - 1);
335
    int i;
336
    for (i = 0 ; i < dcache_line_size ; i += 4) {
337
        stl(addr + i , 0);
338
    }
339
    if (env->reserve_addr == addr)
340
        env->reserve_addr = (target_ulong)-1ULL;
341
}
342

    
343
void helper_dcbz(target_ulong addr)
344
{
345
    do_dcbz(addr, env->dcache_line_size);
346
}
347

    
348
void helper_dcbz_970(target_ulong addr)
349
{
350
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
351
        do_dcbz(addr, 32);
352
    else
353
        do_dcbz(addr, env->dcache_line_size);
354
}
355

    
356
void helper_icbi(target_ulong addr)
357
{
358
    addr &= ~(env->dcache_line_size - 1);
359
    /* Invalidate one cache line :
360
     * PowerPC specification says this is to be treated like a load
361
     * (not a fetch) by the MMU. To be sure it will be so,
362
     * do the load "by hand".
363
     */
364
    ldl(addr);
365
}
366

    
367
// XXX: to be tested
368
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
369
{
370
    int i, c, d;
371
    d = 24;
372
    for (i = 0; i < xer_bc; i++) {
373
        c = ldub(addr);
374
        addr = addr_add(addr, 1);
375
        /* ra (if not 0) and rb are never modified */
376
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
377
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
378
        }
379
        if (unlikely(c == xer_cmp))
380
            break;
381
        if (likely(d != 0)) {
382
            d -= 8;
383
        } else {
384
            d = 24;
385
            reg++;
386
            reg = reg & 0x1F;
387
        }
388
    }
389
    return i;
390
}
391

    
392
/*****************************************************************************/
393
/* Fixed point operations helpers */
394
#if defined(TARGET_PPC64)
395

    
396
/* multiply high word */
397
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
398
{
399
    uint64_t tl, th;
400

    
401
    muls64(&tl, &th, arg1, arg2);
402
    return th;
403
}
404

    
405
/* multiply high word unsigned */
406
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
407
{
408
    uint64_t tl, th;
409

    
410
    mulu64(&tl, &th, arg1, arg2);
411
    return th;
412
}
413

    
414
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
415
{
416
    int64_t th;
417
    uint64_t tl;
418

    
419
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
420
    /* If th != 0 && th != -1, then we had an overflow */
421
    if (likely((uint64_t)(th + 1) <= 1)) {
422
        env->xer &= ~(1 << XER_OV);
423
    } else {
424
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
425
    }
426
    return (int64_t)tl;
427
}
428
#endif
429

    
430
target_ulong helper_cntlzw (target_ulong t)
431
{
432
    return clz32(t);
433
}
434

    
435
#if defined(TARGET_PPC64)
436
target_ulong helper_cntlzd (target_ulong t)
437
{
438
    return clz64(t);
439
}
440
#endif
441

    
442
/* shift right arithmetic helper */
443
target_ulong helper_sraw (target_ulong value, target_ulong shift)
444
{
445
    int32_t ret;
446

    
447
    if (likely(!(shift & 0x20))) {
448
        if (likely((uint32_t)shift != 0)) {
449
            shift &= 0x1f;
450
            ret = (int32_t)value >> shift;
451
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
452
                env->xer &= ~(1 << XER_CA);
453
            } else {
454
                env->xer |= (1 << XER_CA);
455
            }
456
        } else {
457
            ret = (int32_t)value;
458
            env->xer &= ~(1 << XER_CA);
459
        }
460
    } else {
461
        ret = (int32_t)value >> 31;
462
        if (ret) {
463
            env->xer |= (1 << XER_CA);
464
        } else {
465
            env->xer &= ~(1 << XER_CA);
466
        }
467
    }
468
    return (target_long)ret;
469
}
470

    
471
#if defined(TARGET_PPC64)
472
target_ulong helper_srad (target_ulong value, target_ulong shift)
473
{
474
    int64_t ret;
475

    
476
    if (likely(!(shift & 0x40))) {
477
        if (likely((uint64_t)shift != 0)) {
478
            shift &= 0x3f;
479
            ret = (int64_t)value >> shift;
480
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
481
                env->xer &= ~(1 << XER_CA);
482
            } else {
483
                env->xer |= (1 << XER_CA);
484
            }
485
        } else {
486
            ret = (int64_t)value;
487
            env->xer &= ~(1 << XER_CA);
488
        }
489
    } else {
490
        ret = (int64_t)value >> 63;
491
        if (ret) {
492
            env->xer |= (1 << XER_CA);
493
        } else {
494
            env->xer &= ~(1 << XER_CA);
495
        }
496
    }
497
    return ret;
498
}
499
#endif
500

    
501
#if defined(TARGET_PPC64)
502
target_ulong helper_popcntb (target_ulong val)
503
{
504
    val = (val & 0x5555555555555555ULL) + ((val >>  1) &
505
                                           0x5555555555555555ULL);
506
    val = (val & 0x3333333333333333ULL) + ((val >>  2) &
507
                                           0x3333333333333333ULL);
508
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) &
509
                                           0x0f0f0f0f0f0f0f0fULL);
510
    return val;
511
}
512

    
513
target_ulong helper_popcntw (target_ulong val)
514
{
515
    val = (val & 0x5555555555555555ULL) + ((val >>  1) &
516
                                           0x5555555555555555ULL);
517
    val = (val & 0x3333333333333333ULL) + ((val >>  2) &
518
                                           0x3333333333333333ULL);
519
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) &
520
                                           0x0f0f0f0f0f0f0f0fULL);
521
    val = (val & 0x00ff00ff00ff00ffULL) + ((val >>  8) &
522
                                           0x00ff00ff00ff00ffULL);
523
    val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
524
                                           0x0000ffff0000ffffULL);
525
    return val;
526
}
527

    
528
target_ulong helper_popcntd (target_ulong val)
529
{
530
    return ctpop64(val);
531
}
532
#else
533
target_ulong helper_popcntb (target_ulong val)
534
{
535
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
536
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
537
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
538
    return val;
539
}
540

    
541
target_ulong helper_popcntw (target_ulong val)
542
{
543
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
544
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
545
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
546
    val = (val & 0x00ff00ff) + ((val >>  8) & 0x00ff00ff);
547
    val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
548
    return val;
549
}
550
#endif
551

    
552
/*****************************************************************************/
553
/* Floating point operations helpers */
554
uint64_t helper_float32_to_float64(uint32_t arg)
555
{
556
    CPU_FloatU f;
557
    CPU_DoubleU d;
558
    f.l = arg;
559
    d.d = float32_to_float64(f.f, &env->fp_status);
560
    return d.ll;
561
}
562

    
563
uint32_t helper_float64_to_float32(uint64_t arg)
564
{
565
    CPU_FloatU f;
566
    CPU_DoubleU d;
567
    d.ll = arg;
568
    f.f = float64_to_float32(d.d, &env->fp_status);
569
    return f.l;
570
}
571

    
572
static inline int isden(float64 d)
573
{
574
    CPU_DoubleU u;
575

    
576
    u.d = d;
577

    
578
    return ((u.ll >> 52) & 0x7FF) == 0;
579
}
580

    
581
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
582
{
583
    CPU_DoubleU farg;
584
    int isneg;
585
    int ret;
586
    farg.ll = arg;
587
    isneg = float64_is_neg(farg.d);
588
    if (unlikely(float64_is_any_nan(farg.d))) {
589
        if (float64_is_signaling_nan(farg.d)) {
590
            /* Signaling NaN: flags are undefined */
591
            ret = 0x00;
592
        } else {
593
            /* Quiet NaN */
594
            ret = 0x11;
595
        }
596
    } else if (unlikely(float64_is_infinity(farg.d))) {
597
        /* +/- infinity */
598
        if (isneg)
599
            ret = 0x09;
600
        else
601
            ret = 0x05;
602
    } else {
603
        if (float64_is_zero(farg.d)) {
604
            /* +/- zero */
605
            if (isneg)
606
                ret = 0x12;
607
            else
608
                ret = 0x02;
609
        } else {
610
            if (isden(farg.d)) {
611
                /* Denormalized numbers */
612
                ret = 0x10;
613
            } else {
614
                /* Normalized numbers */
615
                ret = 0x00;
616
            }
617
            if (isneg) {
618
                ret |= 0x08;
619
            } else {
620
                ret |= 0x04;
621
            }
622
        }
623
    }
624
    if (set_fprf) {
625
        /* We update FPSCR_FPRF */
626
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
627
        env->fpscr |= ret << FPSCR_FPRF;
628
    }
629
    /* We just need fpcc to update Rc1 */
630
    return ret & 0xF;
631
}
632

    
633
/* Floating-point invalid operations exception */
634
static inline uint64_t fload_invalid_op_excp(int op)
635
{
636
    uint64_t ret = 0;
637
    int ve;
638

    
639
    ve = fpscr_ve;
640
    switch (op) {
641
    case POWERPC_EXCP_FP_VXSNAN:
642
        env->fpscr |= 1 << FPSCR_VXSNAN;
643
        break;
644
    case POWERPC_EXCP_FP_VXSOFT:
645
        env->fpscr |= 1 << FPSCR_VXSOFT;
646
        break;
647
    case POWERPC_EXCP_FP_VXISI:
648
        /* Magnitude subtraction of infinities */
649
        env->fpscr |= 1 << FPSCR_VXISI;
650
        goto update_arith;
651
    case POWERPC_EXCP_FP_VXIDI:
652
        /* Division of infinity by infinity */
653
        env->fpscr |= 1 << FPSCR_VXIDI;
654
        goto update_arith;
655
    case POWERPC_EXCP_FP_VXZDZ:
656
        /* Division of zero by zero */
657
        env->fpscr |= 1 << FPSCR_VXZDZ;
658
        goto update_arith;
659
    case POWERPC_EXCP_FP_VXIMZ:
660
        /* Multiplication of zero by infinity */
661
        env->fpscr |= 1 << FPSCR_VXIMZ;
662
        goto update_arith;
663
    case POWERPC_EXCP_FP_VXVC:
664
        /* Ordered comparison of NaN */
665
        env->fpscr |= 1 << FPSCR_VXVC;
666
        env->fpscr &= ~(0xF << FPSCR_FPCC);
667
        env->fpscr |= 0x11 << FPSCR_FPCC;
668
        /* We must update the target FPR before raising the exception */
669
        if (ve != 0) {
670
            env->exception_index = POWERPC_EXCP_PROGRAM;
671
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
672
            /* Update the floating-point enabled exception summary */
673
            env->fpscr |= 1 << FPSCR_FEX;
674
            /* Exception is differed */
675
            ve = 0;
676
        }
677
        break;
678
    case POWERPC_EXCP_FP_VXSQRT:
679
        /* Square root of a negative number */
680
        env->fpscr |= 1 << FPSCR_VXSQRT;
681
    update_arith:
682
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
683
        if (ve == 0) {
684
            /* Set the result to quiet NaN */
685
            ret = 0x7FF8000000000000ULL;
686
            env->fpscr &= ~(0xF << FPSCR_FPCC);
687
            env->fpscr |= 0x11 << FPSCR_FPCC;
688
        }
689
        break;
690
    case POWERPC_EXCP_FP_VXCVI:
691
        /* Invalid conversion */
692
        env->fpscr |= 1 << FPSCR_VXCVI;
693
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
694
        if (ve == 0) {
695
            /* Set the result to quiet NaN */
696
            ret = 0x7FF8000000000000ULL;
697
            env->fpscr &= ~(0xF << FPSCR_FPCC);
698
            env->fpscr |= 0x11 << FPSCR_FPCC;
699
        }
700
        break;
701
    }
702
    /* Update the floating-point invalid operation summary */
703
    env->fpscr |= 1 << FPSCR_VX;
704
    /* Update the floating-point exception summary */
705
    env->fpscr |= 1 << FPSCR_FX;
706
    if (ve != 0) {
707
        /* Update the floating-point enabled exception summary */
708
        env->fpscr |= 1 << FPSCR_FEX;
709
        if (msr_fe0 != 0 || msr_fe1 != 0)
710
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
711
    }
712
    return ret;
713
}
714

    
715
static inline void float_zero_divide_excp(void)
716
{
717
    env->fpscr |= 1 << FPSCR_ZX;
718
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
719
    /* Update the floating-point exception summary */
720
    env->fpscr |= 1 << FPSCR_FX;
721
    if (fpscr_ze != 0) {
722
        /* Update the floating-point enabled exception summary */
723
        env->fpscr |= 1 << FPSCR_FEX;
724
        if (msr_fe0 != 0 || msr_fe1 != 0) {
725
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
726
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
727
        }
728
    }
729
}
730

    
731
static inline void float_overflow_excp(void)
732
{
733
    env->fpscr |= 1 << FPSCR_OX;
734
    /* Update the floating-point exception summary */
735
    env->fpscr |= 1 << FPSCR_FX;
736
    if (fpscr_oe != 0) {
737
        /* XXX: should adjust the result */
738
        /* Update the floating-point enabled exception summary */
739
        env->fpscr |= 1 << FPSCR_FEX;
740
        /* We must update the target FPR before raising the exception */
741
        env->exception_index = POWERPC_EXCP_PROGRAM;
742
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
743
    } else {
744
        env->fpscr |= 1 << FPSCR_XX;
745
        env->fpscr |= 1 << FPSCR_FI;
746
    }
747
}
748

    
749
static inline void float_underflow_excp(void)
750
{
751
    env->fpscr |= 1 << FPSCR_UX;
752
    /* Update the floating-point exception summary */
753
    env->fpscr |= 1 << FPSCR_FX;
754
    if (fpscr_ue != 0) {
755
        /* XXX: should adjust the result */
756
        /* Update the floating-point enabled exception summary */
757
        env->fpscr |= 1 << FPSCR_FEX;
758
        /* We must update the target FPR before raising the exception */
759
        env->exception_index = POWERPC_EXCP_PROGRAM;
760
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
761
    }
762
}
763

    
764
static inline void float_inexact_excp(void)
765
{
766
    env->fpscr |= 1 << FPSCR_XX;
767
    /* Update the floating-point exception summary */
768
    env->fpscr |= 1 << FPSCR_FX;
769
    if (fpscr_xe != 0) {
770
        /* Update the floating-point enabled exception summary */
771
        env->fpscr |= 1 << FPSCR_FEX;
772
        /* We must update the target FPR before raising the exception */
773
        env->exception_index = POWERPC_EXCP_PROGRAM;
774
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
775
    }
776
}
777

    
778
static inline void fpscr_set_rounding_mode(void)
779
{
780
    int rnd_type;
781

    
782
    /* Set rounding mode */
783
    switch (fpscr_rn) {
784
    case 0:
785
        /* Best approximation (round to nearest) */
786
        rnd_type = float_round_nearest_even;
787
        break;
788
    case 1:
789
        /* Smaller magnitude (round toward zero) */
790
        rnd_type = float_round_to_zero;
791
        break;
792
    case 2:
793
        /* Round toward +infinite */
794
        rnd_type = float_round_up;
795
        break;
796
    default:
797
    case 3:
798
        /* Round toward -infinite */
799
        rnd_type = float_round_down;
800
        break;
801
    }
802
    set_float_rounding_mode(rnd_type, &env->fp_status);
803
}
804

    
805
void helper_fpscr_clrbit (uint32_t bit)
806
{
807
    int prev;
808

    
809
    prev = (env->fpscr >> bit) & 1;
810
    env->fpscr &= ~(1 << bit);
811
    if (prev == 1) {
812
        switch (bit) {
813
        case FPSCR_RN1:
814
        case FPSCR_RN:
815
            fpscr_set_rounding_mode();
816
            break;
817
        default:
818
            break;
819
        }
820
    }
821
}
822

    
823
void helper_fpscr_setbit (uint32_t bit)
824
{
825
    int prev;
826

    
827
    prev = (env->fpscr >> bit) & 1;
828
    env->fpscr |= 1 << bit;
829
    if (prev == 0) {
830
        switch (bit) {
831
        case FPSCR_VX:
832
            env->fpscr |= 1 << FPSCR_FX;
833
            if (fpscr_ve)
834
                goto raise_ve;
835
        case FPSCR_OX:
836
            env->fpscr |= 1 << FPSCR_FX;
837
            if (fpscr_oe)
838
                goto raise_oe;
839
            break;
840
        case FPSCR_UX:
841
            env->fpscr |= 1 << FPSCR_FX;
842
            if (fpscr_ue)
843
                goto raise_ue;
844
            break;
845
        case FPSCR_ZX:
846
            env->fpscr |= 1 << FPSCR_FX;
847
            if (fpscr_ze)
848
                goto raise_ze;
849
            break;
850
        case FPSCR_XX:
851
            env->fpscr |= 1 << FPSCR_FX;
852
            if (fpscr_xe)
853
                goto raise_xe;
854
            break;
855
        case FPSCR_VXSNAN:
856
        case FPSCR_VXISI:
857
        case FPSCR_VXIDI:
858
        case FPSCR_VXZDZ:
859
        case FPSCR_VXIMZ:
860
        case FPSCR_VXVC:
861
        case FPSCR_VXSOFT:
862
        case FPSCR_VXSQRT:
863
        case FPSCR_VXCVI:
864
            env->fpscr |= 1 << FPSCR_VX;
865
            env->fpscr |= 1 << FPSCR_FX;
866
            if (fpscr_ve != 0)
867
                goto raise_ve;
868
            break;
869
        case FPSCR_VE:
870
            if (fpscr_vx != 0) {
871
            raise_ve:
872
                env->error_code = POWERPC_EXCP_FP;
873
                if (fpscr_vxsnan)
874
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
875
                if (fpscr_vxisi)
876
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
877
                if (fpscr_vxidi)
878
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
879
                if (fpscr_vxzdz)
880
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
881
                if (fpscr_vximz)
882
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
883
                if (fpscr_vxvc)
884
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
885
                if (fpscr_vxsoft)
886
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
887
                if (fpscr_vxsqrt)
888
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
889
                if (fpscr_vxcvi)
890
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
891
                goto raise_excp;
892
            }
893
            break;
894
        case FPSCR_OE:
895
            if (fpscr_ox != 0) {
896
            raise_oe:
897
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
898
                goto raise_excp;
899
            }
900
            break;
901
        case FPSCR_UE:
902
            if (fpscr_ux != 0) {
903
            raise_ue:
904
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
905
                goto raise_excp;
906
            }
907
            break;
908
        case FPSCR_ZE:
909
            if (fpscr_zx != 0) {
910
            raise_ze:
911
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
912
                goto raise_excp;
913
            }
914
            break;
915
        case FPSCR_XE:
916
            if (fpscr_xx != 0) {
917
            raise_xe:
918
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
919
                goto raise_excp;
920
            }
921
            break;
922
        case FPSCR_RN1:
923
        case FPSCR_RN:
924
            fpscr_set_rounding_mode();
925
            break;
926
        default:
927
            break;
928
        raise_excp:
929
            /* Update the floating-point enabled exception summary */
930
            env->fpscr |= 1 << FPSCR_FEX;
931
                /* We have to update Rc1 before raising the exception */
932
            env->exception_index = POWERPC_EXCP_PROGRAM;
933
            break;
934
        }
935
    }
936
}
937

    
938
void helper_store_fpscr (uint64_t arg, uint32_t mask)
939
{
940
    /*
941
     * We use only the 32 LSB of the incoming fpr
942
     */
943
    uint32_t prev, new;
944
    int i;
945

    
946
    prev = env->fpscr;
947
    new = (uint32_t)arg;
948
    new &= ~0x60000000;
949
    new |= prev & 0x60000000;
950
    for (i = 0; i < 8; i++) {
951
        if (mask & (1 << i)) {
952
            env->fpscr &= ~(0xF << (4 * i));
953
            env->fpscr |= new & (0xF << (4 * i));
954
        }
955
    }
956
    /* Update VX and FEX */
957
    if (fpscr_ix != 0)
958
        env->fpscr |= 1 << FPSCR_VX;
959
    else
960
        env->fpscr &= ~(1 << FPSCR_VX);
961
    if ((fpscr_ex & fpscr_eex) != 0) {
962
        env->fpscr |= 1 << FPSCR_FEX;
963
        env->exception_index = POWERPC_EXCP_PROGRAM;
964
        /* XXX: we should compute it properly */
965
        env->error_code = POWERPC_EXCP_FP;
966
    }
967
    else
968
        env->fpscr &= ~(1 << FPSCR_FEX);
969
    fpscr_set_rounding_mode();
970
}
971

    
972
void helper_float_check_status (void)
973
{
974
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
975
        (env->error_code & POWERPC_EXCP_FP)) {
976
        /* Differred floating-point exception after target FPR update */
977
        if (msr_fe0 != 0 || msr_fe1 != 0)
978
            helper_raise_exception_err(env->exception_index, env->error_code);
979
    } else {
980
        int status = get_float_exception_flags(&env->fp_status);
981
        if (status & float_flag_divbyzero) {
982
            float_zero_divide_excp();
983
        } else if (status & float_flag_overflow) {
984
            float_overflow_excp();
985
        } else if (status & float_flag_underflow) {
986
            float_underflow_excp();
987
        } else if (status & float_flag_inexact) {
988
            float_inexact_excp();
989
        }
990
    }
991
}
992

    
993
void helper_reset_fpstatus (void)
994
{
995
    set_float_exception_flags(0, &env->fp_status);
996
}
997

    
998
/* fadd - fadd. */
999
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
1000
{
1001
    CPU_DoubleU farg1, farg2;
1002

    
1003
    farg1.ll = arg1;
1004
    farg2.ll = arg2;
1005

    
1006
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1007
                 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1008
        /* Magnitude subtraction of infinities */
1009
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1010
    } else {
1011
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1012
                     float64_is_signaling_nan(farg2.d))) {
1013
            /* sNaN addition */
1014
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1015
        }
1016
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1017
    }
1018

    
1019
    return farg1.ll;
1020
}
1021

    
1022
/* fsub - fsub. */
1023
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1024
{
1025
    CPU_DoubleU farg1, farg2;
1026

    
1027
    farg1.ll = arg1;
1028
    farg2.ll = arg2;
1029

    
1030
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1031
                 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1032
        /* Magnitude subtraction of infinities */
1033
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1034
    } else {
1035
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1036
                     float64_is_signaling_nan(farg2.d))) {
1037
            /* sNaN subtraction */
1038
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1039
        }
1040
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1041
    }
1042

    
1043
    return farg1.ll;
1044
}
1045

    
1046
/* fmul - fmul. */
1047
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1048
{
1049
    CPU_DoubleU farg1, farg2;
1050

    
1051
    farg1.ll = arg1;
1052
    farg2.ll = arg2;
1053

    
1054
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1055
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1056
        /* Multiplication of zero by infinity */
1057
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1058
    } else {
1059
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1060
                     float64_is_signaling_nan(farg2.d))) {
1061
            /* sNaN multiplication */
1062
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1063
        }
1064
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1065
    }
1066

    
1067
    return farg1.ll;
1068
}
1069

    
1070
/* fdiv - fdiv. */
1071
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1072
{
1073
    CPU_DoubleU farg1, farg2;
1074

    
1075
    farg1.ll = arg1;
1076
    farg2.ll = arg2;
1077

    
1078
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1079
        /* Division of infinity by infinity */
1080
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1081
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1082
        /* Division of zero by zero */
1083
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1084
    } else {
1085
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1086
                     float64_is_signaling_nan(farg2.d))) {
1087
            /* sNaN division */
1088
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1089
        }
1090
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1091
    }
1092

    
1093
    return farg1.ll;
1094
}
1095

    
1096
/* fabs */
1097
uint64_t helper_fabs (uint64_t arg)
1098
{
1099
    CPU_DoubleU farg;
1100

    
1101
    farg.ll = arg;
1102
    farg.d = float64_abs(farg.d);
1103
    return farg.ll;
1104
}
1105

    
1106
/* fnabs */
1107
uint64_t helper_fnabs (uint64_t arg)
1108
{
1109
    CPU_DoubleU farg;
1110

    
1111
    farg.ll = arg;
1112
    farg.d = float64_abs(farg.d);
1113
    farg.d = float64_chs(farg.d);
1114
    return farg.ll;
1115
}
1116

    
1117
/* fneg */
1118
uint64_t helper_fneg (uint64_t arg)
1119
{
1120
    CPU_DoubleU farg;
1121

    
1122
    farg.ll = arg;
1123
    farg.d = float64_chs(farg.d);
1124
    return farg.ll;
1125
}
1126

    
1127
/* fctiw - fctiw. */
1128
uint64_t helper_fctiw (uint64_t arg)
1129
{
1130
    CPU_DoubleU farg;
1131
    farg.ll = arg;
1132

    
1133
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1134
        /* sNaN conversion */
1135
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1136
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1137
        /* qNan / infinity conversion */
1138
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1139
    } else {
1140
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1141
        /* XXX: higher bits are not supposed to be significant.
1142
         *     to make tests easier, return the same as a real PowerPC 750
1143
         */
1144
        farg.ll |= 0xFFF80000ULL << 32;
1145
    }
1146
    return farg.ll;
1147
}
1148

    
1149
/* fctiwz - fctiwz. */
1150
uint64_t helper_fctiwz (uint64_t arg)
1151
{
1152
    CPU_DoubleU farg;
1153
    farg.ll = arg;
1154

    
1155
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1156
        /* sNaN conversion */
1157
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1158
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1159
        /* qNan / infinity conversion */
1160
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1161
    } else {
1162
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1163
        /* XXX: higher bits are not supposed to be significant.
1164
         *     to make tests easier, return the same as a real PowerPC 750
1165
         */
1166
        farg.ll |= 0xFFF80000ULL << 32;
1167
    }
1168
    return farg.ll;
1169
}
1170

    
1171
#if defined(TARGET_PPC64)
1172
/* fcfid - fcfid. */
1173
uint64_t helper_fcfid (uint64_t arg)
1174
{
1175
    CPU_DoubleU farg;
1176
    farg.d = int64_to_float64(arg, &env->fp_status);
1177
    return farg.ll;
1178
}
1179

    
1180
/* fctid - fctid. */
1181
uint64_t helper_fctid (uint64_t arg)
1182
{
1183
    CPU_DoubleU farg;
1184
    farg.ll = arg;
1185

    
1186
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1187
        /* sNaN conversion */
1188
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1189
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1190
        /* qNan / infinity conversion */
1191
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1192
    } else {
1193
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1194
    }
1195
    return farg.ll;
1196
}
1197

    
1198
/* fctidz - fctidz. */
1199
uint64_t helper_fctidz (uint64_t arg)
1200
{
1201
    CPU_DoubleU farg;
1202
    farg.ll = arg;
1203

    
1204
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1205
        /* sNaN conversion */
1206
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1207
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1208
        /* qNan / infinity conversion */
1209
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1210
    } else {
1211
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1212
    }
1213
    return farg.ll;
1214
}
1215

    
1216
#endif
1217

    
1218
static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1219
{
1220
    CPU_DoubleU farg;
1221
    farg.ll = arg;
1222

    
1223
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1224
        /* sNaN round */
1225
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1226
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1227
        /* qNan / infinity round */
1228
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1229
    } else {
1230
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1231
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1232
        /* Restore rounding mode from FPSCR */
1233
        fpscr_set_rounding_mode();
1234
    }
1235
    return farg.ll;
1236
}
1237

    
1238
uint64_t helper_frin (uint64_t arg)
1239
{
1240
    return do_fri(arg, float_round_nearest_even);
1241
}
1242

    
1243
uint64_t helper_friz (uint64_t arg)
1244
{
1245
    return do_fri(arg, float_round_to_zero);
1246
}
1247

    
1248
uint64_t helper_frip (uint64_t arg)
1249
{
1250
    return do_fri(arg, float_round_up);
1251
}
1252

    
1253
uint64_t helper_frim (uint64_t arg)
1254
{
1255
    return do_fri(arg, float_round_down);
1256
}
1257

    
1258
/* fmadd - fmadd. */
1259
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1260
{
1261
    CPU_DoubleU farg1, farg2, farg3;
1262

    
1263
    farg1.ll = arg1;
1264
    farg2.ll = arg2;
1265
    farg3.ll = arg3;
1266

    
1267
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1268
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1269
        /* Multiplication of zero by infinity */
1270
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1271
    } else {
1272
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1273
                     float64_is_signaling_nan(farg2.d) ||
1274
                     float64_is_signaling_nan(farg3.d))) {
1275
            /* sNaN operation */
1276
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1277
        }
1278
        /* This is the way the PowerPC specification defines it */
1279
        float128 ft0_128, ft1_128;
1280

    
1281
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1282
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1283
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1284
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1285
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1286
            /* Magnitude subtraction of infinities */
1287
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1288
        } else {
1289
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1290
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1291
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1292
        }
1293
    }
1294

    
1295
    return farg1.ll;
1296
}
1297

    
1298
/* fmsub - fmsub. */
1299
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1300
{
1301
    CPU_DoubleU farg1, farg2, farg3;
1302

    
1303
    farg1.ll = arg1;
1304
    farg2.ll = arg2;
1305
    farg3.ll = arg3;
1306

    
1307
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1308
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1309
        /* Multiplication of zero by infinity */
1310
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1311
    } else {
1312
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1313
                     float64_is_signaling_nan(farg2.d) ||
1314
                     float64_is_signaling_nan(farg3.d))) {
1315
            /* sNaN operation */
1316
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1317
        }
1318
        /* This is the way the PowerPC specification defines it */
1319
        float128 ft0_128, ft1_128;
1320

    
1321
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1322
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1323
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1324
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1325
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1326
            /* Magnitude subtraction of infinities */
1327
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1328
        } else {
1329
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1330
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1331
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1332
        }
1333
    }
1334
    return farg1.ll;
1335
}
1336

    
1337
/* fnmadd - fnmadd. */
1338
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1339
{
1340
    CPU_DoubleU farg1, farg2, farg3;
1341

    
1342
    farg1.ll = arg1;
1343
    farg2.ll = arg2;
1344
    farg3.ll = arg3;
1345

    
1346
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1347
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1348
        /* Multiplication of zero by infinity */
1349
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1350
    } else {
1351
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1352
                     float64_is_signaling_nan(farg2.d) ||
1353
                     float64_is_signaling_nan(farg3.d))) {
1354
            /* sNaN operation */
1355
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1356
        }
1357
        /* This is the way the PowerPC specification defines it */
1358
        float128 ft0_128, ft1_128;
1359

    
1360
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1361
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1362
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1363
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1364
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1365
            /* Magnitude subtraction of infinities */
1366
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1367
        } else {
1368
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1369
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1370
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1371
        }
1372
        if (likely(!float64_is_any_nan(farg1.d))) {
1373
            farg1.d = float64_chs(farg1.d);
1374
        }
1375
    }
1376
    return farg1.ll;
1377
}
1378

    
1379
/* fnmsub - fnmsub. */
1380
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1381
{
1382
    CPU_DoubleU farg1, farg2, farg3;
1383

    
1384
    farg1.ll = arg1;
1385
    farg2.ll = arg2;
1386
    farg3.ll = arg3;
1387

    
1388
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1389
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1390
        /* Multiplication of zero by infinity */
1391
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1392
    } else {
1393
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1394
                     float64_is_signaling_nan(farg2.d) ||
1395
                     float64_is_signaling_nan(farg3.d))) {
1396
            /* sNaN operation */
1397
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1398
        }
1399
        /* This is the way the PowerPC specification defines it */
1400
        float128 ft0_128, ft1_128;
1401

    
1402
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1403
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1404
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1405
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1406
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1407
            /* Magnitude subtraction of infinities */
1408
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1409
        } else {
1410
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1411
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1412
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1413
        }
1414
        if (likely(!float64_is_any_nan(farg1.d))) {
1415
            farg1.d = float64_chs(farg1.d);
1416
        }
1417
    }
1418
    return farg1.ll;
1419
}
1420

    
1421
/* frsp - frsp. */
1422
uint64_t helper_frsp (uint64_t arg)
1423
{
1424
    CPU_DoubleU farg;
1425
    float32 f32;
1426
    farg.ll = arg;
1427

    
1428
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1429
        /* sNaN square root */
1430
       fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1431
    }
1432
    f32 = float64_to_float32(farg.d, &env->fp_status);
1433
    farg.d = float32_to_float64(f32, &env->fp_status);
1434

    
1435
    return farg.ll;
1436
}
1437

    
1438
/* fsqrt - fsqrt. */
1439
uint64_t helper_fsqrt (uint64_t arg)
1440
{
1441
    CPU_DoubleU farg;
1442
    farg.ll = arg;
1443

    
1444
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1445
        /* Square root of a negative nonzero number */
1446
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1447
    } else {
1448
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1449
            /* sNaN square root */
1450
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1451
        }
1452
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1453
    }
1454
    return farg.ll;
1455
}
1456

    
1457
/* fre - fre. */
1458
uint64_t helper_fre (uint64_t arg)
1459
{
1460
    CPU_DoubleU farg;
1461
    farg.ll = arg;
1462

    
1463
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1464
        /* sNaN reciprocal */
1465
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1466
    }
1467
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1468
    return farg.d;
1469
}
1470

    
1471
/* fres - fres. */
1472
uint64_t helper_fres (uint64_t arg)
1473
{
1474
    CPU_DoubleU farg;
1475
    float32 f32;
1476
    farg.ll = arg;
1477

    
1478
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1479
        /* sNaN reciprocal */
1480
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1481
    }
1482
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1483
    f32 = float64_to_float32(farg.d, &env->fp_status);
1484
    farg.d = float32_to_float64(f32, &env->fp_status);
1485

    
1486
    return farg.ll;
1487
}
1488

    
1489
/* frsqrte  - frsqrte. */
1490
uint64_t helper_frsqrte (uint64_t arg)
1491
{
1492
    CPU_DoubleU farg;
1493
    float32 f32;
1494
    farg.ll = arg;
1495

    
1496
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1497
        /* Reciprocal square root of a negative nonzero number */
1498
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1499
    } else {
1500
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1501
            /* sNaN reciprocal square root */
1502
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1503
        }
1504
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1505
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1506
        f32 = float64_to_float32(farg.d, &env->fp_status);
1507
        farg.d = float32_to_float64(f32, &env->fp_status);
1508
    }
1509
    return farg.ll;
1510
}
1511

    
1512
/* fsel - fsel. */
1513
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1514
{
1515
    CPU_DoubleU farg1;
1516

    
1517
    farg1.ll = arg1;
1518

    
1519
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_any_nan(farg1.d)) {
1520
        return arg2;
1521
    } else {
1522
        return arg3;
1523
    }
1524
}
1525

    
1526
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1527
{
1528
    CPU_DoubleU farg1, farg2;
1529
    uint32_t ret = 0;
1530
    farg1.ll = arg1;
1531
    farg2.ll = arg2;
1532

    
1533
    if (unlikely(float64_is_any_nan(farg1.d) ||
1534
                 float64_is_any_nan(farg2.d))) {
1535
        ret = 0x01UL;
1536
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1537
        ret = 0x08UL;
1538
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1539
        ret = 0x04UL;
1540
    } else {
1541
        ret = 0x02UL;
1542
    }
1543

    
1544
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1545
    env->fpscr |= ret << FPSCR_FPRF;
1546
    env->crf[crfD] = ret;
1547
    if (unlikely(ret == 0x01UL
1548
                 && (float64_is_signaling_nan(farg1.d) ||
1549
                     float64_is_signaling_nan(farg2.d)))) {
1550
        /* sNaN comparison */
1551
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1552
    }
1553
}
1554

    
1555
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1556
{
1557
    CPU_DoubleU farg1, farg2;
1558
    uint32_t ret = 0;
1559
    farg1.ll = arg1;
1560
    farg2.ll = arg2;
1561

    
1562
    if (unlikely(float64_is_any_nan(farg1.d) ||
1563
                 float64_is_any_nan(farg2.d))) {
1564
        ret = 0x01UL;
1565
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1566
        ret = 0x08UL;
1567
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1568
        ret = 0x04UL;
1569
    } else {
1570
        ret = 0x02UL;
1571
    }
1572

    
1573
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1574
    env->fpscr |= ret << FPSCR_FPRF;
1575
    env->crf[crfD] = ret;
1576
    if (unlikely (ret == 0x01UL)) {
1577
        if (float64_is_signaling_nan(farg1.d) ||
1578
            float64_is_signaling_nan(farg2.d)) {
1579
            /* sNaN comparison */
1580
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1581
                                  POWERPC_EXCP_FP_VXVC);
1582
        } else {
1583
            /* qNaN comparison */
1584
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1585
        }
1586
    }
1587
}
1588

    
1589
#if !defined (CONFIG_USER_ONLY)
1590
void helper_store_msr (target_ulong val)
1591
{
1592
    val = hreg_store_msr(env, val, 0);
1593
    if (val != 0) {
1594
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1595
        helper_raise_exception(val);
1596
    }
1597
}
1598

    
1599
static inline void do_rfi(target_ulong nip, target_ulong msr,
1600
                          target_ulong msrm, int keep_msrh)
1601
{
1602
#if defined(TARGET_PPC64)
1603
    if (msr & (1ULL << MSR_SF)) {
1604
        nip = (uint64_t)nip;
1605
        msr &= (uint64_t)msrm;
1606
    } else {
1607
        nip = (uint32_t)nip;
1608
        msr = (uint32_t)(msr & msrm);
1609
        if (keep_msrh)
1610
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1611
    }
1612
#else
1613
    nip = (uint32_t)nip;
1614
    msr &= (uint32_t)msrm;
1615
#endif
1616
    /* XXX: beware: this is false if VLE is supported */
1617
    env->nip = nip & ~((target_ulong)0x00000003);
1618
    hreg_store_msr(env, msr, 1);
1619
#if defined (DEBUG_OP)
1620
    cpu_dump_rfi(env->nip, env->msr);
1621
#endif
1622
    /* No need to raise an exception here,
1623
     * as rfi is always the last insn of a TB
1624
     */
1625
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1626
}
1627

    
1628
void helper_rfi (void)
1629
{
1630
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1631
           ~((target_ulong)0x783F0000), 1);
1632
}
1633

    
1634
#if defined(TARGET_PPC64)
1635
void helper_rfid (void)
1636
{
1637
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1638
           ~((target_ulong)0x783F0000), 0);
1639
}
1640

    
1641
void helper_hrfid (void)
1642
{
1643
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1644
           ~((target_ulong)0x783F0000), 0);
1645
}
1646
#endif
1647
#endif
1648

    
1649
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1650
{
1651
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1652
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1653
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1654
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1655
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1656
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1657
    }
1658
}
1659

    
1660
#if defined(TARGET_PPC64)
1661
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1662
{
1663
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1664
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1665
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1666
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1667
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1668
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1669
}
1670
#endif
1671

    
1672
/*****************************************************************************/
1673
/* PowerPC 601 specific instructions (POWER bridge) */
1674

    
1675
target_ulong helper_clcs (uint32_t arg)
1676
{
1677
    switch (arg) {
1678
    case 0x0CUL:
1679
        /* Instruction cache line size */
1680
        return env->icache_line_size;
1681
        break;
1682
    case 0x0DUL:
1683
        /* Data cache line size */
1684
        return env->dcache_line_size;
1685
        break;
1686
    case 0x0EUL:
1687
        /* Minimum cache line size */
1688
        return (env->icache_line_size < env->dcache_line_size) ?
1689
                env->icache_line_size : env->dcache_line_size;
1690
        break;
1691
    case 0x0FUL:
1692
        /* Maximum cache line size */
1693
        return (env->icache_line_size > env->dcache_line_size) ?
1694
                env->icache_line_size : env->dcache_line_size;
1695
        break;
1696
    default:
1697
        /* Undefined */
1698
        return 0;
1699
        break;
1700
    }
1701
}
1702

    
1703
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1704
{
1705
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1706

    
1707
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1708
        (int32_t)arg2 == 0) {
1709
        env->spr[SPR_MQ] = 0;
1710
        return INT32_MIN;
1711
    } else {
1712
        env->spr[SPR_MQ] = tmp % arg2;
1713
        return  tmp / (int32_t)arg2;
1714
    }
1715
}
1716

    
1717
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1718
{
1719
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1720

    
1721
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1722
        (int32_t)arg2 == 0) {
1723
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1724
        env->spr[SPR_MQ] = 0;
1725
        return INT32_MIN;
1726
    } else {
1727
        env->spr[SPR_MQ] = tmp % arg2;
1728
        tmp /= (int32_t)arg2;
1729
        if ((int32_t)tmp != tmp) {
1730
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1731
        } else {
1732
            env->xer &= ~(1 << XER_OV);
1733
        }
1734
        return tmp;
1735
    }
1736
}
1737

    
1738
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1739
{
1740
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1741
        (int32_t)arg2 == 0) {
1742
        env->spr[SPR_MQ] = 0;
1743
        return INT32_MIN;
1744
    } else {
1745
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1746
        return (int32_t)arg1 / (int32_t)arg2;
1747
    }
1748
}
1749

    
1750
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1751
{
1752
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1753
        (int32_t)arg2 == 0) {
1754
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1755
        env->spr[SPR_MQ] = 0;
1756
        return INT32_MIN;
1757
    } else {
1758
        env->xer &= ~(1 << XER_OV);
1759
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1760
        return (int32_t)arg1 / (int32_t)arg2;
1761
    }
1762
}
1763

    
1764
#if !defined (CONFIG_USER_ONLY)
1765
target_ulong helper_rac (target_ulong addr)
1766
{
1767
    mmu_ctx_t ctx;
1768
    int nb_BATs;
1769
    target_ulong ret = 0;
1770

    
1771
    /* We don't have to generate many instances of this instruction,
1772
     * as rac is supervisor only.
1773
     */
1774
    /* XXX: FIX THIS: Pretend we have no BAT */
1775
    nb_BATs = env->nb_BATs;
1776
    env->nb_BATs = 0;
1777
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1778
        ret = ctx.raddr;
1779
    env->nb_BATs = nb_BATs;
1780
    return ret;
1781
}
1782

    
1783
void helper_rfsvc (void)
1784
{
1785
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1786
}
1787
#endif
1788

    
1789
/*****************************************************************************/
1790
/* 602 specific instructions */
1791
/* mfrom is the most crazy instruction ever seen, imho ! */
1792
/* Real implementation uses a ROM table. Do the same */
1793
/* Extremly decomposed:
1794
 *                      -arg / 256
1795
 * return 256 * log10(10           + 1.0) + 0.5
1796
 */
1797
#if !defined (CONFIG_USER_ONLY)
1798
target_ulong helper_602_mfrom (target_ulong arg)
1799
{
1800
    if (likely(arg < 602)) {
1801
#include "mfrom_table.c"
1802
        return mfrom_ROM_table[arg];
1803
    } else {
1804
        return 0;
1805
    }
1806
}
1807
#endif
1808

    
1809
/*****************************************************************************/
1810
/* Embedded PowerPC specific helpers */
1811

    
1812
/* XXX: to be improved to check access rights when in user-mode */
1813
target_ulong helper_load_dcr (target_ulong dcrn)
1814
{
1815
    uint32_t val = 0;
1816

    
1817
    if (unlikely(env->dcr_env == NULL)) {
1818
        qemu_log("No DCR environment\n");
1819
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1820
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1821
    } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1822
        qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1823
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1824
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1825
    }
1826
    return val;
1827
}
1828

    
1829
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1830
{
1831
    if (unlikely(env->dcr_env == NULL)) {
1832
        qemu_log("No DCR environment\n");
1833
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1834
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1835
    } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1836
        qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1837
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1838
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1839
    }
1840
}
1841

    
1842
#if !defined(CONFIG_USER_ONLY)
1843
void helper_40x_rfci (void)
1844
{
1845
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1846
           ~((target_ulong)0xFFFF0000), 0);
1847
}
1848

    
1849
void helper_rfci (void)
1850
{
1851
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1852
           ~((target_ulong)0x3FFF0000), 0);
1853
}
1854

    
1855
void helper_rfdi (void)
1856
{
1857
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1858
           ~((target_ulong)0x3FFF0000), 0);
1859
}
1860

    
1861
void helper_rfmci (void)
1862
{
1863
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1864
           ~((target_ulong)0x3FFF0000), 0);
1865
}
1866
#endif
1867

    
1868
/* 440 specific */
1869
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1870
{
1871
    target_ulong mask;
1872
    int i;
1873

    
1874
    i = 1;
1875
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1876
        if ((high & mask) == 0) {
1877
            if (update_Rc) {
1878
                env->crf[0] = 0x4;
1879
            }
1880
            goto done;
1881
        }
1882
        i++;
1883
    }
1884
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1885
        if ((low & mask) == 0) {
1886
            if (update_Rc) {
1887
                env->crf[0] = 0x8;
1888
            }
1889
            goto done;
1890
        }
1891
        i++;
1892
    }
1893
    if (update_Rc) {
1894
        env->crf[0] = 0x2;
1895
    }
1896
 done:
1897
    env->xer = (env->xer & ~0x7F) | i;
1898
    if (update_Rc) {
1899
        env->crf[0] |= xer_so;
1900
    }
1901
    return i;
1902
}
1903

    
1904
/*****************************************************************************/
1905
/* Altivec extension helpers */
1906
#if defined(HOST_WORDS_BIGENDIAN)
1907
#define HI_IDX 0
1908
#define LO_IDX 1
1909
#else
1910
#define HI_IDX 1
1911
#define LO_IDX 0
1912
#endif
1913

    
1914
#if defined(HOST_WORDS_BIGENDIAN)
1915
#define VECTOR_FOR_INORDER_I(index, element)            \
1916
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1917
#else
1918
#define VECTOR_FOR_INORDER_I(index, element)            \
1919
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1920
#endif
1921

    
1922
/* If X is a NaN, store the corresponding QNaN into RESULT.  Otherwise,
1923
 * execute the following block.  */
1924
#define DO_HANDLE_NAN(result, x)                \
1925
    if (float32_is_any_nan(x)) {                                \
1926
        CPU_FloatU __f;                                         \
1927
        __f.f = x;                                              \
1928
        __f.l = __f.l | (1 << 22);  /* Set QNaN bit. */         \
1929
        result = __f.f;                                         \
1930
    } else
1931

    
1932
#define HANDLE_NAN1(result, x)                  \
1933
    DO_HANDLE_NAN(result, x)
1934
#define HANDLE_NAN2(result, x, y)               \
1935
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1936
#define HANDLE_NAN3(result, x, y, z)            \
1937
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1938

    
1939
/* Saturating arithmetic helpers.  */
1940
#define SATCVT(from, to, from_type, to_type, min, max)                  \
1941
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1942
    {                                                                   \
1943
        to_type r;                                                      \
1944
        if (x < (from_type)min) {                                       \
1945
            r = min;                                                    \
1946
            *sat = 1;                                                   \
1947
        } else if (x > (from_type)max) {                                \
1948
            r = max;                                                    \
1949
            *sat = 1;                                                   \
1950
        } else {                                                        \
1951
            r = x;                                                      \
1952
        }                                                               \
1953
        return r;                                                       \
1954
    }
1955
#define SATCVTU(from, to, from_type, to_type, min, max)                 \
1956
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1957
    {                                                                   \
1958
        to_type r;                                                      \
1959
        if (x > (from_type)max) {                                       \
1960
            r = max;                                                    \
1961
            *sat = 1;                                                   \
1962
        } else {                                                        \
1963
            r = x;                                                      \
1964
        }                                                               \
1965
        return r;                                                       \
1966
    }
1967
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
1968
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
1969
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
1970

    
1971
SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
1972
SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
1973
SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
1974
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
1975
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
1976
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
1977
#undef SATCVT
1978
#undef SATCVTU
1979

    
1980
#define LVE(name, access, swap, element)                        \
1981
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
1982
    {                                                           \
1983
        size_t n_elems = ARRAY_SIZE(r->element);                \
1984
        int adjust = HI_IDX*(n_elems-1);                        \
1985
        int sh = sizeof(r->element[0]) >> 1;                    \
1986
        int index = (addr & 0xf) >> sh;                         \
1987
        if(msr_le) {                                            \
1988
            r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
1989
        } else {                                                        \
1990
            r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
1991
        }                                                               \
1992
    }
1993
#define I(x) (x)
1994
LVE(lvebx, ldub, I, u8)
1995
LVE(lvehx, lduw, bswap16, u16)
1996
LVE(lvewx, ldl, bswap32, u32)
1997
#undef I
1998
#undef LVE
1999

    
2000
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2001
{
2002
    int i, j = (sh & 0xf);
2003

    
2004
    VECTOR_FOR_INORDER_I (i, u8) {
2005
        r->u8[i] = j++;
2006
    }
2007
}
2008

    
2009
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2010
{
2011
    int i, j = 0x10 - (sh & 0xf);
2012

    
2013
    VECTOR_FOR_INORDER_I (i, u8) {
2014
        r->u8[i] = j++;
2015
    }
2016
}
2017

    
2018
#define STVE(name, access, swap, element)                       \
2019
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2020
    {                                                           \
2021
        size_t n_elems = ARRAY_SIZE(r->element);                \
2022
        int adjust = HI_IDX*(n_elems-1);                        \
2023
        int sh = sizeof(r->element[0]) >> 1;                    \
2024
        int index = (addr & 0xf) >> sh;                         \
2025
        if(msr_le) {                                            \
2026
            access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2027
        } else {                                                        \
2028
            access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2029
        }                                                               \
2030
    }
2031
#define I(x) (x)
2032
STVE(stvebx, stb, I, u8)
2033
STVE(stvehx, stw, bswap16, u16)
2034
STVE(stvewx, stl, bswap32, u32)
2035
#undef I
2036
#undef LVE
2037

    
2038
void helper_mtvscr (ppc_avr_t *r)
2039
{
2040
#if defined(HOST_WORDS_BIGENDIAN)
2041
    env->vscr = r->u32[3];
2042
#else
2043
    env->vscr = r->u32[0];
2044
#endif
2045
    set_flush_to_zero(vscr_nj, &env->vec_status);
2046
}
2047

    
2048
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2049
{
2050
    int i;
2051
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2052
        r->u32[i] = ~a->u32[i] < b->u32[i];
2053
    }
2054
}
2055

    
2056
#define VARITH_DO(name, op, element)        \
2057
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2058
{                                                                       \
2059
    int i;                                                              \
2060
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2061
        r->element[i] = a->element[i] op b->element[i];                 \
2062
    }                                                                   \
2063
}
2064
#define VARITH(suffix, element)                  \
2065
  VARITH_DO(add##suffix, +, element)             \
2066
  VARITH_DO(sub##suffix, -, element)
2067
VARITH(ubm, u8)
2068
VARITH(uhm, u16)
2069
VARITH(uwm, u32)
2070
#undef VARITH_DO
2071
#undef VARITH
2072

    
2073
#define VARITHFP(suffix, func)                                          \
2074
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2075
    {                                                                   \
2076
        int i;                                                          \
2077
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2078
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2079
                r->f[i] = func(a->f[i], b->f[i], &env->vec_status);     \
2080
            }                                                           \
2081
        }                                                               \
2082
    }
2083
VARITHFP(addfp, float32_add)
2084
VARITHFP(subfp, float32_sub)
2085
#undef VARITHFP
2086

    
2087
#define VARITHSAT_CASE(type, op, cvt, element)                          \
2088
    {                                                                   \
2089
        type result = (type)a->element[i] op (type)b->element[i];       \
2090
        r->element[i] = cvt(result, &sat);                              \
2091
    }
2092

    
2093
#define VARITHSAT_DO(name, op, optype, cvt, element)                    \
2094
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2095
    {                                                                   \
2096
        int sat = 0;                                                    \
2097
        int i;                                                          \
2098
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2099
            switch (sizeof(r->element[0])) {                            \
2100
            case 1: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2101
            case 2: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2102
            case 4: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2103
            }                                                           \
2104
        }                                                               \
2105
        if (sat) {                                                      \
2106
            env->vscr |= (1 << VSCR_SAT);                               \
2107
        }                                                               \
2108
    }
2109
#define VARITHSAT_SIGNED(suffix, element, optype, cvt)        \
2110
    VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)    \
2111
    VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2112
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)       \
2113
    VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)     \
2114
    VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2115
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2116
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2117
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2118
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2119
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2120
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2121
#undef VARITHSAT_CASE
2122
#undef VARITHSAT_DO
2123
#undef VARITHSAT_SIGNED
2124
#undef VARITHSAT_UNSIGNED
2125

    
2126
#define VAVG_DO(name, element, etype)                                   \
2127
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2128
    {                                                                   \
2129
        int i;                                                          \
2130
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2131
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2132
            r->element[i] = x >> 1;                                     \
2133
        }                                                               \
2134
    }
2135

    
2136
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2137
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2138
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2139
VAVG(b, s8, int16_t, u8, uint16_t)
2140
VAVG(h, s16, int32_t, u16, uint32_t)
2141
VAVG(w, s32, int64_t, u32, uint64_t)
2142
#undef VAVG_DO
2143
#undef VAVG
2144

    
2145
#define VCF(suffix, cvt, element)                                       \
2146
    void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2147
    {                                                                   \
2148
        int i;                                                          \
2149
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2150
            float32 t = cvt(b->element[i], &env->vec_status);           \
2151
            r->f[i] = float32_scalbn (t, -uim, &env->vec_status);       \
2152
        }                                                               \
2153
    }
2154
VCF(ux, uint32_to_float32, u32)
2155
VCF(sx, int32_to_float32, s32)
2156
#undef VCF
2157

    
2158
#define VCMP_DO(suffix, compare, element, record)                       \
2159
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2160
    {                                                                   \
2161
        uint32_t ones = (uint32_t)-1;                                   \
2162
        uint32_t all = ones;                                            \
2163
        uint32_t none = 0;                                              \
2164
        int i;                                                          \
2165
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2166
            uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2167
            switch (sizeof (a->element[0])) {                           \
2168
            case 4: r->u32[i] = result; break;                          \
2169
            case 2: r->u16[i] = result; break;                          \
2170
            case 1: r->u8[i] = result; break;                           \
2171
            }                                                           \
2172
            all &= result;                                              \
2173
            none |= result;                                             \
2174
        }                                                               \
2175
        if (record) {                                                   \
2176
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2177
        }                                                               \
2178
    }
2179
#define VCMP(suffix, compare, element)          \
2180
    VCMP_DO(suffix, compare, element, 0)        \
2181
    VCMP_DO(suffix##_dot, compare, element, 1)
2182
VCMP(equb, ==, u8)
2183
VCMP(equh, ==, u16)
2184
VCMP(equw, ==, u32)
2185
VCMP(gtub, >, u8)
2186
VCMP(gtuh, >, u16)
2187
VCMP(gtuw, >, u32)
2188
VCMP(gtsb, >, s8)
2189
VCMP(gtsh, >, s16)
2190
VCMP(gtsw, >, s32)
2191
#undef VCMP_DO
2192
#undef VCMP
2193

    
2194
#define VCMPFP_DO(suffix, compare, order, record)                       \
2195
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2196
    {                                                                   \
2197
        uint32_t ones = (uint32_t)-1;                                   \
2198
        uint32_t all = ones;                                            \
2199
        uint32_t none = 0;                                              \
2200
        int i;                                                          \
2201
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2202
            uint32_t result;                                            \
2203
            int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2204
            if (rel == float_relation_unordered) {                      \
2205
                result = 0;                                             \
2206
            } else if (rel compare order) {                             \
2207
                result = ones;                                          \
2208
            } else {                                                    \
2209
                result = 0;                                             \
2210
            }                                                           \
2211
            r->u32[i] = result;                                         \
2212
            all &= result;                                              \
2213
            none |= result;                                             \
2214
        }                                                               \
2215
        if (record) {                                                   \
2216
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2217
        }                                                               \
2218
    }
2219
#define VCMPFP(suffix, compare, order)           \
2220
    VCMPFP_DO(suffix, compare, order, 0)         \
2221
    VCMPFP_DO(suffix##_dot, compare, order, 1)
2222
VCMPFP(eqfp, ==, float_relation_equal)
2223
VCMPFP(gefp, !=, float_relation_less)
2224
VCMPFP(gtfp, ==, float_relation_greater)
2225
#undef VCMPFP_DO
2226
#undef VCMPFP
2227

    
2228
static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2229
                                    int record)
2230
{
2231
    int i;
2232
    int all_in = 0;
2233
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2234
        int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2235
        if (le_rel == float_relation_unordered) {
2236
            r->u32[i] = 0xc0000000;
2237
            /* ALL_IN does not need to be updated here.  */
2238
        } else {
2239
            float32 bneg = float32_chs(b->f[i]);
2240
            int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2241
            int le = le_rel != float_relation_greater;
2242
            int ge = ge_rel != float_relation_less;
2243
            r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2244
            all_in |= (!le | !ge);
2245
        }
2246
    }
2247
    if (record) {
2248
        env->crf[6] = (all_in == 0) << 1;
2249
    }
2250
}
2251

    
2252
void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2253
{
2254
    vcmpbfp_internal(r, a, b, 0);
2255
}
2256

    
2257
void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2258
{
2259
    vcmpbfp_internal(r, a, b, 1);
2260
}
2261

    
2262
#define VCT(suffix, satcvt, element)                                    \
2263
    void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2264
    {                                                                   \
2265
        int i;                                                          \
2266
        int sat = 0;                                                    \
2267
        float_status s = env->vec_status;                               \
2268
        set_float_rounding_mode(float_round_to_zero, &s);               \
2269
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2270
            if (float32_is_any_nan(b->f[i])) {                          \
2271
                r->element[i] = 0;                                      \
2272
            } else {                                                    \
2273
                float64 t = float32_to_float64(b->f[i], &s);            \
2274
                int64_t j;                                              \
2275
                t = float64_scalbn(t, uim, &s);                         \
2276
                j = float64_to_int64(t, &s);                            \
2277
                r->element[i] = satcvt(j, &sat);                        \
2278
            }                                                           \
2279
        }                                                               \
2280
        if (sat) {                                                      \
2281
            env->vscr |= (1 << VSCR_SAT);                               \
2282
        }                                                               \
2283
    }
2284
VCT(uxs, cvtsduw, u32)
2285
VCT(sxs, cvtsdsw, s32)
2286
#undef VCT
2287

    
2288
void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2289
{
2290
    int i;
2291
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2292
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2293
            /* Need to do the computation in higher precision and round
2294
             * once at the end.  */
2295
            float64 af, bf, cf, t;
2296
            af = float32_to_float64(a->f[i], &env->vec_status);
2297
            bf = float32_to_float64(b->f[i], &env->vec_status);
2298
            cf = float32_to_float64(c->f[i], &env->vec_status);
2299
            t = float64_mul(af, cf, &env->vec_status);
2300
            t = float64_add(t, bf, &env->vec_status);
2301
            r->f[i] = float64_to_float32(t, &env->vec_status);
2302
        }
2303
    }
2304
}
2305

    
2306
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2307
{
2308
    int sat = 0;
2309
    int i;
2310

    
2311
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2312
        int32_t prod = a->s16[i] * b->s16[i];
2313
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2314
        r->s16[i] = cvtswsh (t, &sat);
2315
    }
2316

    
2317
    if (sat) {
2318
        env->vscr |= (1 << VSCR_SAT);
2319
    }
2320
}
2321

    
2322
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2323
{
2324
    int sat = 0;
2325
    int i;
2326

    
2327
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2328
        int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2329
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2330
        r->s16[i] = cvtswsh (t, &sat);
2331
    }
2332

    
2333
    if (sat) {
2334
        env->vscr |= (1 << VSCR_SAT);
2335
    }
2336
}
2337

    
2338
#define VMINMAX_DO(name, compare, element)                              \
2339
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2340
    {                                                                   \
2341
        int i;                                                          \
2342
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2343
            if (a->element[i] compare b->element[i]) {                  \
2344
                r->element[i] = b->element[i];                          \
2345
            } else {                                                    \
2346
                r->element[i] = a->element[i];                          \
2347
            }                                                           \
2348
        }                                                               \
2349
    }
2350
#define VMINMAX(suffix, element)                \
2351
  VMINMAX_DO(min##suffix, >, element)           \
2352
  VMINMAX_DO(max##suffix, <, element)
2353
VMINMAX(sb, s8)
2354
VMINMAX(sh, s16)
2355
VMINMAX(sw, s32)
2356
VMINMAX(ub, u8)
2357
VMINMAX(uh, u16)
2358
VMINMAX(uw, u32)
2359
#undef VMINMAX_DO
2360
#undef VMINMAX
2361

    
2362
#define VMINMAXFP(suffix, rT, rF)                                       \
2363
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2364
    {                                                                   \
2365
        int i;                                                          \
2366
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2367
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2368
                if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2369
                    r->f[i] = rT->f[i];                                 \
2370
                } else {                                                \
2371
                    r->f[i] = rF->f[i];                                 \
2372
                }                                                       \
2373
            }                                                           \
2374
        }                                                               \
2375
    }
2376
VMINMAXFP(minfp, a, b)
2377
VMINMAXFP(maxfp, b, a)
2378
#undef VMINMAXFP
2379

    
2380
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2381
{
2382
    int i;
2383
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2384
        int32_t prod = a->s16[i] * b->s16[i];
2385
        r->s16[i] = (int16_t) (prod + c->s16[i]);
2386
    }
2387
}
2388

    
2389
#define VMRG_DO(name, element, highp)                                   \
2390
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2391
    {                                                                   \
2392
        ppc_avr_t result;                                               \
2393
        int i;                                                          \
2394
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2395
        for (i = 0; i < n_elems/2; i++) {                               \
2396
            if (highp) {                                                \
2397
                result.element[i*2+HI_IDX] = a->element[i];             \
2398
                result.element[i*2+LO_IDX] = b->element[i];             \
2399
            } else {                                                    \
2400
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2401
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2402
            }                                                           \
2403
        }                                                               \
2404
        *r = result;                                                    \
2405
    }
2406
#if defined(HOST_WORDS_BIGENDIAN)
2407
#define MRGHI 0
2408
#define MRGLO 1
2409
#else
2410
#define MRGHI 1
2411
#define MRGLO 0
2412
#endif
2413
#define VMRG(suffix, element)                   \
2414
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2415
  VMRG_DO(mrgh##suffix, element, MRGLO)
2416
VMRG(b, u8)
2417
VMRG(h, u16)
2418
VMRG(w, u32)
2419
#undef VMRG_DO
2420
#undef VMRG
2421
#undef MRGHI
2422
#undef MRGLO
2423

    
2424
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2425
{
2426
    int32_t prod[16];
2427
    int i;
2428

    
2429
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2430
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2431
    }
2432

    
2433
    VECTOR_FOR_INORDER_I(i, s32) {
2434
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2435
    }
2436
}
2437

    
2438
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2439
{
2440
    int32_t prod[8];
2441
    int i;
2442

    
2443
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2444
        prod[i] = a->s16[i] * b->s16[i];
2445
    }
2446

    
2447
    VECTOR_FOR_INORDER_I(i, s32) {
2448
        r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2449
    }
2450
}
2451

    
2452
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2453
{
2454
    int32_t prod[8];
2455
    int i;
2456
    int sat = 0;
2457

    
2458
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2459
        prod[i] = (int32_t)a->s16[i] * b->s16[i];
2460
    }
2461

    
2462
    VECTOR_FOR_INORDER_I (i, s32) {
2463
        int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2464
        r->u32[i] = cvtsdsw(t, &sat);
2465
    }
2466

    
2467
    if (sat) {
2468
        env->vscr |= (1 << VSCR_SAT);
2469
    }
2470
}
2471

    
2472
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2473
{
2474
    uint16_t prod[16];
2475
    int i;
2476

    
2477
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2478
        prod[i] = a->u8[i] * b->u8[i];
2479
    }
2480

    
2481
    VECTOR_FOR_INORDER_I(i, u32) {
2482
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2483
    }
2484
}
2485

    
2486
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2487
{
2488
    uint32_t prod[8];
2489
    int i;
2490

    
2491
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2492
        prod[i] = a->u16[i] * b->u16[i];
2493
    }
2494

    
2495
    VECTOR_FOR_INORDER_I(i, u32) {
2496
        r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2497
    }
2498
}
2499

    
2500
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2501
{
2502
    uint32_t prod[8];
2503
    int i;
2504
    int sat = 0;
2505

    
2506
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2507
        prod[i] = a->u16[i] * b->u16[i];
2508
    }
2509

    
2510
    VECTOR_FOR_INORDER_I (i, s32) {
2511
        uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2512
        r->u32[i] = cvtuduw(t, &sat);
2513
    }
2514

    
2515
    if (sat) {
2516
        env->vscr |= (1 << VSCR_SAT);
2517
    }
2518
}
2519

    
2520
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2521
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2522
    {                                                                   \
2523
        int i;                                                          \
2524
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2525
            if (evenp) {                                                \
2526
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2527
            } else {                                                    \
2528
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2529
            }                                                           \
2530
        }                                                               \
2531
    }
2532
#define VMUL(suffix, mul_element, prod_element) \
2533
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2534
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2535
VMUL(sb, s8, s16)
2536
VMUL(sh, s16, s32)
2537
VMUL(ub, u8, u16)
2538
VMUL(uh, u16, u32)
2539
#undef VMUL_DO
2540
#undef VMUL
2541

    
2542
void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2543
{
2544
    int i;
2545
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2546
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2547
            /* Need to do the computation is higher precision and round
2548
             * once at the end.  */
2549
            float64 af, bf, cf, t;
2550
            af = float32_to_float64(a->f[i], &env->vec_status);
2551
            bf = float32_to_float64(b->f[i], &env->vec_status);
2552
            cf = float32_to_float64(c->f[i], &env->vec_status);
2553
            t = float64_mul(af, cf, &env->vec_status);
2554
            t = float64_sub(t, bf, &env->vec_status);
2555
            t = float64_chs(t);
2556
            r->f[i] = float64_to_float32(t, &env->vec_status);
2557
        }
2558
    }
2559
}
2560

    
2561
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2562
{
2563
    ppc_avr_t result;
2564
    int i;
2565
    VECTOR_FOR_INORDER_I (i, u8) {
2566
        int s = c->u8[i] & 0x1f;
2567
#if defined(HOST_WORDS_BIGENDIAN)
2568
        int index = s & 0xf;
2569
#else
2570
        int index = 15 - (s & 0xf);
2571
#endif
2572
        if (s & 0x10) {
2573
            result.u8[i] = b->u8[index];
2574
        } else {
2575
            result.u8[i] = a->u8[index];
2576
        }
2577
    }
2578
    *r = result;
2579
}
2580

    
2581
#if defined(HOST_WORDS_BIGENDIAN)
2582
#define PKBIG 1
2583
#else
2584
#define PKBIG 0
2585
#endif
2586
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2587
{
2588
    int i, j;
2589
    ppc_avr_t result;
2590
#if defined(HOST_WORDS_BIGENDIAN)
2591
    const ppc_avr_t *x[2] = { a, b };
2592
#else
2593
    const ppc_avr_t *x[2] = { b, a };
2594
#endif
2595

    
2596
    VECTOR_FOR_INORDER_I (i, u64) {
2597
        VECTOR_FOR_INORDER_I (j, u32){
2598
            uint32_t e = x[i]->u32[j];
2599
            result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2600
                                 ((e >> 6) & 0x3e0) |
2601
                                 ((e >> 3) & 0x1f));
2602
        }
2603
    }
2604
    *r = result;
2605
}
2606

    
2607
#define VPK(suffix, from, to, cvt, dosat)       \
2608
    void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2609
    {                                                                   \
2610
        int i;                                                          \
2611
        int sat = 0;                                                    \
2612
        ppc_avr_t result;                                               \
2613
        ppc_avr_t *a0 = PKBIG ? a : b;                                  \
2614
        ppc_avr_t *a1 = PKBIG ? b : a;                                  \
2615
        VECTOR_FOR_INORDER_I (i, from) {                                \
2616
            result.to[i] = cvt(a0->from[i], &sat);                      \
2617
            result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
2618
        }                                                               \
2619
        *r = result;                                                    \
2620
        if (dosat && sat) {                                             \
2621
            env->vscr |= (1 << VSCR_SAT);                               \
2622
        }                                                               \
2623
    }
2624
#define I(x, y) (x)
2625
VPK(shss, s16, s8, cvtshsb, 1)
2626
VPK(shus, s16, u8, cvtshub, 1)
2627
VPK(swss, s32, s16, cvtswsh, 1)
2628
VPK(swus, s32, u16, cvtswuh, 1)
2629
VPK(uhus, u16, u8, cvtuhub, 1)
2630
VPK(uwus, u32, u16, cvtuwuh, 1)
2631
VPK(uhum, u16, u8, I, 0)
2632
VPK(uwum, u32, u16, I, 0)
2633
#undef I
2634
#undef VPK
2635
#undef PKBIG
2636

    
2637
void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2638
{
2639
    int i;
2640
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2641
        HANDLE_NAN1(r->f[i], b->f[i]) {
2642
            r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2643
        }
2644
    }
2645
}
2646

    
2647
#define VRFI(suffix, rounding)                                          \
2648
    void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2649
    {                                                                   \
2650
        int i;                                                          \
2651
        float_status s = env->vec_status;                               \
2652
        set_float_rounding_mode(rounding, &s);                          \
2653
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2654
            HANDLE_NAN1(r->f[i], b->f[i]) {                             \
2655
                r->f[i] = float32_round_to_int (b->f[i], &s);           \
2656
            }                                                           \
2657
        }                                                               \
2658
    }
2659
VRFI(n, float_round_nearest_even)
2660
VRFI(m, float_round_down)
2661
VRFI(p, float_round_up)
2662
VRFI(z, float_round_to_zero)
2663
#undef VRFI
2664

    
2665
#define VROTATE(suffix, element)                                        \
2666
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2667
    {                                                                   \
2668
        int i;                                                          \
2669
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2670
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2671
            unsigned int shift = b->element[i] & mask;                  \
2672
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2673
        }                                                               \
2674
    }
2675
VROTATE(b, u8)
2676
VROTATE(h, u16)
2677
VROTATE(w, u32)
2678
#undef VROTATE
2679

    
2680
void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2681
{
2682
    int i;
2683
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2684
        HANDLE_NAN1(r->f[i], b->f[i]) {
2685
            float32 t = float32_sqrt(b->f[i], &env->vec_status);
2686
            r->f[i] = float32_div(float32_one, t, &env->vec_status);
2687
        }
2688
    }
2689
}
2690

    
2691
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2692
{
2693
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2694
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2695
}
2696

    
2697
void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
2698
{
2699
    int i;
2700
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2701
        HANDLE_NAN1(r->f[i], b->f[i]) {
2702
            r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2703
        }
2704
    }
2705
}
2706

    
2707
void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2708
{
2709
    int i;
2710
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2711
        HANDLE_NAN1(r->f[i], b->f[i]) {
2712
            r->f[i] = float32_log2(b->f[i], &env->vec_status);
2713
        }
2714
    }
2715
}
2716

    
2717
#if defined(HOST_WORDS_BIGENDIAN)
2718
#define LEFT 0
2719
#define RIGHT 1
2720
#else
2721
#define LEFT 1
2722
#define RIGHT 0
2723
#endif
2724
/* The specification says that the results are undefined if all of the
2725
 * shift counts are not identical.  We check to make sure that they are
2726
 * to conform to what real hardware appears to do.  */
2727
#define VSHIFT(suffix, leftp)                                           \
2728
    void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
2729
    {                                                                   \
2730
        int shift = b->u8[LO_IDX*15] & 0x7;                             \
2731
        int doit = 1;                                                   \
2732
        int i;                                                          \
2733
        for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
2734
            doit = doit && ((b->u8[i] & 0x7) == shift);                 \
2735
        }                                                               \
2736
        if (doit) {                                                     \
2737
            if (shift == 0) {                                           \
2738
                *r = *a;                                                \
2739
            } else if (leftp) {                                         \
2740
                uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
2741
                r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
2742
                r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
2743
            } else {                                                    \
2744
                uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
2745
                r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
2746
                r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
2747
            }                                                           \
2748
        }                                                               \
2749
    }
2750
VSHIFT(l, LEFT)
2751
VSHIFT(r, RIGHT)
2752
#undef VSHIFT
2753
#undef LEFT
2754
#undef RIGHT
2755

    
2756
#define VSL(suffix, element)                                            \
2757
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2758
    {                                                                   \
2759
        int i;                                                          \
2760
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2761
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2762
            unsigned int shift = b->element[i] & mask;                  \
2763
            r->element[i] = a->element[i] << shift;                     \
2764
        }                                                               \
2765
    }
2766
VSL(b, u8)
2767
VSL(h, u16)
2768
VSL(w, u32)
2769
#undef VSL
2770

    
2771
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2772
{
2773
    int sh = shift & 0xf;
2774
    int i;
2775
    ppc_avr_t result;
2776

    
2777
#if defined(HOST_WORDS_BIGENDIAN)
2778
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2779
        int index = sh + i;
2780
        if (index > 0xf) {
2781
            result.u8[i] = b->u8[index-0x10];
2782
        } else {
2783
            result.u8[i] = a->u8[index];
2784
        }
2785
    }
2786
#else
2787
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2788
        int index = (16 - sh) + i;
2789
        if (index > 0xf) {
2790
            result.u8[i] = a->u8[index-0x10];
2791
        } else {
2792
            result.u8[i] = b->u8[index];
2793
        }
2794
    }
2795
#endif
2796
    *r = result;
2797
}
2798

    
2799
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2800
{
2801
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2802

    
2803
#if defined (HOST_WORDS_BIGENDIAN)
2804
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2805
  memset (&r->u8[16-sh], 0, sh);
2806
#else
2807
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2808
  memset (&r->u8[0], 0, sh);
2809
#endif
2810
}
2811

    
2812
/* Experimental testing shows that hardware masks the immediate.  */
2813
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2814
#if defined(HOST_WORDS_BIGENDIAN)
2815
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2816
#else
2817
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2818
#endif
2819
#define VSPLT(suffix, element)                                          \
2820
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2821
    {                                                                   \
2822
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2823
        int i;                                                          \
2824
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2825
            r->element[i] = s;                                          \
2826
        }                                                               \
2827
    }
2828
VSPLT(b, u8)
2829
VSPLT(h, u16)
2830
VSPLT(w, u32)
2831
#undef VSPLT
2832
#undef SPLAT_ELEMENT
2833
#undef _SPLAT_MASKED
2834

    
2835
#define VSPLTI(suffix, element, splat_type)                     \
2836
    void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat)  \
2837
    {                                                           \
2838
        splat_type x = (int8_t)(splat << 3) >> 3;               \
2839
        int i;                                                  \
2840
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
2841
            r->element[i] = x;                                  \
2842
        }                                                       \
2843
    }
2844
VSPLTI(b, s8, int8_t)
2845
VSPLTI(h, s16, int16_t)
2846
VSPLTI(w, s32, int32_t)
2847
#undef VSPLTI
2848

    
2849
#define VSR(suffix, element)                                            \
2850
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2851
    {                                                                   \
2852
        int i;                                                          \
2853
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2854
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2855
            unsigned int shift = b->element[i] & mask;                  \
2856
            r->element[i] = a->element[i] >> shift;                     \
2857
        }                                                               \
2858
    }
2859
VSR(ab, s8)
2860
VSR(ah, s16)
2861
VSR(aw, s32)
2862
VSR(b, u8)
2863
VSR(h, u16)
2864
VSR(w, u32)
2865
#undef VSR
2866

    
2867
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2868
{
2869
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2870

    
2871
#if defined (HOST_WORDS_BIGENDIAN)
2872
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2873
  memset (&r->u8[0], 0, sh);
2874
#else
2875
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2876
  memset (&r->u8[16-sh], 0, sh);
2877
#endif
2878
}
2879

    
2880
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2881
{
2882
    int i;
2883
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2884
        r->u32[i] = a->u32[i] >= b->u32[i];
2885
    }
2886
}
2887

    
2888
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2889
{
2890
    int64_t t;
2891
    int i, upper;
2892
    ppc_avr_t result;
2893
    int sat = 0;
2894

    
2895
#if defined(HOST_WORDS_BIGENDIAN)
2896
    upper = ARRAY_SIZE(r->s32)-1;
2897
#else
2898
    upper = 0;
2899
#endif
2900
    t = (int64_t)b->s32[upper];
2901
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2902
        t += a->s32[i];
2903
        result.s32[i] = 0;
2904
    }
2905
    result.s32[upper] = cvtsdsw(t, &sat);
2906
    *r = result;
2907

    
2908
    if (sat) {
2909
        env->vscr |= (1 << VSCR_SAT);
2910
    }
2911
}
2912

    
2913
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2914
{
2915
    int i, j, upper;
2916
    ppc_avr_t result;
2917
    int sat = 0;
2918

    
2919
#if defined(HOST_WORDS_BIGENDIAN)
2920
    upper = 1;
2921
#else
2922
    upper = 0;
2923
#endif
2924
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2925
        int64_t t = (int64_t)b->s32[upper+i*2];
2926
        result.u64[i] = 0;
2927
        for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2928
            t += a->s32[2*i+j];
2929
        }
2930
        result.s32[upper+i*2] = cvtsdsw(t, &sat);
2931
    }
2932

    
2933
    *r = result;
2934
    if (sat) {
2935
        env->vscr |= (1 << VSCR_SAT);
2936
    }
2937
}
2938

    
2939
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2940
{
2941
    int i, j;
2942
    int sat = 0;
2943

    
2944
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2945
        int64_t t = (int64_t)b->s32[i];
2946
        for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2947
            t += a->s8[4*i+j];
2948
        }
2949
        r->s32[i] = cvtsdsw(t, &sat);
2950
    }
2951

    
2952
    if (sat) {
2953
        env->vscr |= (1 << VSCR_SAT);
2954
    }
2955
}
2956

    
2957
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2958
{
2959
    int sat = 0;
2960
    int i;
2961

    
2962
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2963
        int64_t t = (int64_t)b->s32[i];
2964
        t += a->s16[2*i] + a->s16[2*i+1];
2965
        r->s32[i] = cvtsdsw(t, &sat);
2966
    }
2967

    
2968
    if (sat) {
2969
        env->vscr |= (1 << VSCR_SAT);
2970
    }
2971
}
2972

    
2973
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2974
{
2975
    int i, j;
2976
    int sat = 0;
2977

    
2978
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2979
        uint64_t t = (uint64_t)b->u32[i];
2980
        for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2981
            t += a->u8[4*i+j];
2982
        }
2983
        r->u32[i] = cvtuduw(t, &sat);
2984
    }
2985

    
2986
    if (sat) {
2987
        env->vscr |= (1 << VSCR_SAT);
2988
    }
2989
}
2990

    
2991
#if defined(HOST_WORDS_BIGENDIAN)
2992
#define UPKHI 1
2993
#define UPKLO 0
2994
#else
2995
#define UPKHI 0
2996
#define UPKLO 1
2997
#endif
2998
#define VUPKPX(suffix, hi)                                      \
2999
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
3000
    {                                                           \
3001
        int i;                                                  \
3002
        ppc_avr_t result;                                       \
3003
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
3004
            uint16_t e = b->u16[hi ? i : i+4];                  \
3005
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
3006
            uint8_t r = (e >> 10) & 0x1f;                       \
3007
            uint8_t g = (e >> 5) & 0x1f;                        \
3008
            uint8_t b = e & 0x1f;                               \
3009
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
3010
        }                                                               \
3011
        *r = result;                                                    \
3012
    }
3013
VUPKPX(lpx, UPKLO)
3014
VUPKPX(hpx, UPKHI)
3015
#undef VUPKPX
3016

    
3017
#define VUPK(suffix, unpacked, packee, hi)                              \
3018
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
3019
    {                                                                   \
3020
        int i;                                                          \
3021
        ppc_avr_t result;                                               \
3022
        if (hi) {                                                       \
3023
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
3024
                result.unpacked[i] = b->packee[i];                      \
3025
            }                                                           \
3026
        } else {                                                        \
3027
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3028
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3029
            }                                                           \
3030
        }                                                               \
3031
        *r = result;                                                    \
3032
    }
3033
VUPK(hsb, s16, s8, UPKHI)
3034
VUPK(hsh, s32, s16, UPKHI)
3035
VUPK(lsb, s16, s8, UPKLO)
3036
VUPK(lsh, s32, s16, UPKLO)
3037
#undef VUPK
3038
#undef UPKHI
3039
#undef UPKLO
3040

    
3041
#undef DO_HANDLE_NAN
3042
#undef HANDLE_NAN1
3043
#undef HANDLE_NAN2
3044
#undef HANDLE_NAN3
3045
#undef VECTOR_FOR_INORDER_I
3046
#undef HI_IDX
3047
#undef LO_IDX
3048

    
3049
/*****************************************************************************/
3050
/* SPE extension helpers */
3051
/* Use a table to make this quicker */
3052
static uint8_t hbrev[16] = {
3053
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3054
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3055
};
3056

    
3057
static inline uint8_t byte_reverse(uint8_t val)
3058
{
3059
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3060
}
3061

    
3062
static inline uint32_t word_reverse(uint32_t val)
3063
{
3064
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3065
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3066
}
3067

    
3068
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3069
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3070
{
3071
    uint32_t a, b, d, mask;
3072

    
3073
    mask = UINT32_MAX >> (32 - MASKBITS);
3074
    a = arg1 & mask;
3075
    b = arg2 & mask;
3076
    d = word_reverse(1 + word_reverse(a | ~b));
3077
    return (arg1 & ~mask) | (d & b);
3078
}
3079

    
3080
uint32_t helper_cntlsw32 (uint32_t val)
3081
{
3082
    if (val & 0x80000000)
3083
        return clz32(~val);
3084
    else
3085
        return clz32(val);
3086
}
3087

    
3088
uint32_t helper_cntlzw32 (uint32_t val)
3089
{
3090
    return clz32(val);
3091
}
3092

    
3093
/* Single-precision floating-point conversions */
3094
static inline uint32_t efscfsi(uint32_t val)
3095
{
3096
    CPU_FloatU u;
3097

    
3098
    u.f = int32_to_float32(val, &env->vec_status);
3099

    
3100
    return u.l;
3101
}
3102

    
3103
static inline uint32_t efscfui(uint32_t val)
3104
{
3105
    CPU_FloatU u;
3106

    
3107
    u.f = uint32_to_float32(val, &env->vec_status);
3108

    
3109
    return u.l;
3110
}
3111

    
3112
static inline int32_t efsctsi(uint32_t val)
3113
{
3114
    CPU_FloatU u;
3115

    
3116
    u.l = val;
3117
    /* NaN are not treated the same way IEEE 754 does */
3118
    if (unlikely(float32_is_quiet_nan(u.f)))
3119
        return 0;
3120

    
3121
    return float32_to_int32(u.f, &env->vec_status);
3122
}
3123

    
3124
static inline uint32_t efsctui(uint32_t val)
3125
{
3126
    CPU_FloatU u;
3127

    
3128
    u.l = val;
3129
    /* NaN are not treated the same way IEEE 754 does */
3130
    if (unlikely(float32_is_quiet_nan(u.f)))
3131
        return 0;
3132

    
3133
    return float32_to_uint32(u.f, &env->vec_status);
3134
}
3135

    
3136
static inline uint32_t efsctsiz(uint32_t val)
3137
{
3138
    CPU_FloatU u;
3139

    
3140
    u.l = val;
3141
    /* NaN are not treated the same way IEEE 754 does */
3142
    if (unlikely(float32_is_quiet_nan(u.f)))
3143
        return 0;
3144

    
3145
    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3146
}
3147

    
3148
static inline uint32_t efsctuiz(uint32_t val)
3149
{
3150
    CPU_FloatU u;
3151

    
3152
    u.l = val;
3153
    /* NaN are not treated the same way IEEE 754 does */
3154
    if (unlikely(float32_is_quiet_nan(u.f)))
3155
        return 0;
3156

    
3157
    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3158
}
3159

    
3160
static inline uint32_t efscfsf(uint32_t val)
3161
{
3162
    CPU_FloatU u;
3163
    float32 tmp;
3164

    
3165
    u.f = int32_to_float32(val, &env->vec_status);
3166
    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3167
    u.f = float32_div(u.f, tmp, &env->vec_status);
3168

    
3169
    return u.l;
3170
}
3171

    
3172
static inline uint32_t efscfuf(uint32_t val)
3173
{
3174
    CPU_FloatU u;
3175
    float32 tmp;
3176

    
3177
    u.f = uint32_to_float32(val, &env->vec_status);
3178
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3179
    u.f = float32_div(u.f, tmp, &env->vec_status);
3180

    
3181
    return u.l;
3182
}
3183

    
3184
static inline uint32_t efsctsf(uint32_t val)
3185
{
3186
    CPU_FloatU u;
3187
    float32 tmp;
3188

    
3189
    u.l = val;
3190
    /* NaN are not treated the same way IEEE 754 does */
3191
    if (unlikely(float32_is_quiet_nan(u.f)))
3192
        return 0;
3193
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3194
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3195

    
3196
    return float32_to_int32(u.f, &env->vec_status);
3197
}
3198

    
3199
static inline uint32_t efsctuf(uint32_t val)
3200
{
3201
    CPU_FloatU u;
3202
    float32 tmp;
3203

    
3204
    u.l = val;
3205
    /* NaN are not treated the same way IEEE 754 does */
3206
    if (unlikely(float32_is_quiet_nan(u.f)))
3207
        return 0;
3208
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3209
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3210

    
3211
    return float32_to_uint32(u.f, &env->vec_status);
3212
}
3213

    
3214
#define HELPER_SPE_SINGLE_CONV(name)                                          \
3215
uint32_t helper_e##name (uint32_t val)                                        \
3216
{                                                                             \
3217
    return e##name(val);                                                      \
3218
}
3219
/* efscfsi */
3220
HELPER_SPE_SINGLE_CONV(fscfsi);
3221
/* efscfui */
3222
HELPER_SPE_SINGLE_CONV(fscfui);
3223
/* efscfuf */
3224
HELPER_SPE_SINGLE_CONV(fscfuf);
3225
/* efscfsf */
3226
HELPER_SPE_SINGLE_CONV(fscfsf);
3227
/* efsctsi */
3228
HELPER_SPE_SINGLE_CONV(fsctsi);
3229
/* efsctui */
3230
HELPER_SPE_SINGLE_CONV(fsctui);
3231
/* efsctsiz */
3232
HELPER_SPE_SINGLE_CONV(fsctsiz);
3233
/* efsctuiz */
3234
HELPER_SPE_SINGLE_CONV(fsctuiz);
3235
/* efsctsf */
3236
HELPER_SPE_SINGLE_CONV(fsctsf);
3237
/* efsctuf */
3238
HELPER_SPE_SINGLE_CONV(fsctuf);
3239

    
3240
#define HELPER_SPE_VECTOR_CONV(name)                                          \
3241
uint64_t helper_ev##name (uint64_t val)                                       \
3242
{                                                                             \
3243
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
3244
            (uint64_t)e##name(val);                                           \
3245
}
3246
/* evfscfsi */
3247
HELPER_SPE_VECTOR_CONV(fscfsi);
3248
/* evfscfui */
3249
HELPER_SPE_VECTOR_CONV(fscfui);
3250
/* evfscfuf */
3251
HELPER_SPE_VECTOR_CONV(fscfuf);
3252
/* evfscfsf */
3253
HELPER_SPE_VECTOR_CONV(fscfsf);
3254
/* evfsctsi */
3255
HELPER_SPE_VECTOR_CONV(fsctsi);
3256
/* evfsctui */
3257
HELPER_SPE_VECTOR_CONV(fsctui);
3258
/* evfsctsiz */
3259
HELPER_SPE_VECTOR_CONV(fsctsiz);
3260
/* evfsctuiz */
3261
HELPER_SPE_VECTOR_CONV(fsctuiz);
3262
/* evfsctsf */
3263
HELPER_SPE_VECTOR_CONV(fsctsf);
3264
/* evfsctuf */
3265
HELPER_SPE_VECTOR_CONV(fsctuf);
3266

    
3267
/* Single-precision floating-point arithmetic */
3268
static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3269
{
3270
    CPU_FloatU u1, u2;
3271
    u1.l = op1;
3272
    u2.l = op2;
3273
    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3274
    return u1.l;
3275
}
3276

    
3277
static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3278
{
3279
    CPU_FloatU u1, u2;
3280
    u1.l = op1;
3281
    u2.l = op2;
3282
    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3283
    return u1.l;
3284
}
3285

    
3286
static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3287
{
3288
    CPU_FloatU u1, u2;
3289
    u1.l = op1;
3290
    u2.l = op2;
3291
    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3292
    return u1.l;
3293
}
3294

    
3295
static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3296
{
3297
    CPU_FloatU u1, u2;
3298
    u1.l = op1;
3299
    u2.l = op2;
3300
    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3301
    return u1.l;
3302
}
3303

    
3304
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
3305
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3306
{                                                                             \
3307
    return e##name(op1, op2);                                                 \
3308
}
3309
/* efsadd */
3310
HELPER_SPE_SINGLE_ARITH(fsadd);
3311
/* efssub */
3312
HELPER_SPE_SINGLE_ARITH(fssub);
3313
/* efsmul */
3314
HELPER_SPE_SINGLE_ARITH(fsmul);
3315
/* efsdiv */
3316
HELPER_SPE_SINGLE_ARITH(fsdiv);
3317

    
3318
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
3319
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3320
{                                                                             \
3321
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
3322
            (uint64_t)e##name(op1, op2);                                      \
3323
}
3324
/* evfsadd */
3325
HELPER_SPE_VECTOR_ARITH(fsadd);
3326
/* evfssub */
3327
HELPER_SPE_VECTOR_ARITH(fssub);
3328
/* evfsmul */
3329
HELPER_SPE_VECTOR_ARITH(fsmul);
3330
/* evfsdiv */
3331
HELPER_SPE_VECTOR_ARITH(fsdiv);
3332

    
3333
/* Single-precision floating-point comparisons */
3334
static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3335
{
3336
    CPU_FloatU u1, u2;
3337
    u1.l = op1;
3338
    u2.l = op2;
3339
    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3340
}
3341

    
3342
static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3343
{
3344
    CPU_FloatU u1, u2;
3345
    u1.l = op1;
3346
    u2.l = op2;
3347
    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3348
}
3349

    
3350
static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3351
{
3352
    CPU_FloatU u1, u2;
3353
    u1.l = op1;
3354
    u2.l = op2;
3355
    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3356
}
3357

    
3358
static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3359
{
3360
    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
3361
    return efscmplt(op1, op2);
3362
}
3363

    
3364
static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3365
{
3366
    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
3367
    return efscmpgt(op1, op2);
3368
}
3369

    
3370
static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3371
{
3372
    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
3373
    return efscmpeq(op1, op2);
3374
}
3375

    
3376
#define HELPER_SINGLE_SPE_CMP(name)                                           \
3377
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3378
{                                                                             \
3379
    return e##name(op1, op2) << 2;                                            \
3380
}
3381
/* efststlt */
3382
HELPER_SINGLE_SPE_CMP(fststlt);
3383
/* efststgt */
3384
HELPER_SINGLE_SPE_CMP(fststgt);
3385
/* efststeq */
3386
HELPER_SINGLE_SPE_CMP(fststeq);
3387
/* efscmplt */
3388
HELPER_SINGLE_SPE_CMP(fscmplt);
3389
/* efscmpgt */
3390
HELPER_SINGLE_SPE_CMP(fscmpgt);
3391
/* efscmpeq */
3392
HELPER_SINGLE_SPE_CMP(fscmpeq);
3393

    
3394
static inline uint32_t evcmp_merge(int t0, int t1)
3395
{
3396
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3397
}
3398

    
3399
#define HELPER_VECTOR_SPE_CMP(name)                                           \
3400
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3401
{                                                                             \
3402
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
3403
}
3404
/* evfststlt */
3405
HELPER_VECTOR_SPE_CMP(fststlt);
3406
/* evfststgt */
3407
HELPER_VECTOR_SPE_CMP(fststgt);
3408
/* evfststeq */
3409
HELPER_VECTOR_SPE_CMP(fststeq);
3410
/* evfscmplt */
3411
HELPER_VECTOR_SPE_CMP(fscmplt);
3412
/* evfscmpgt */
3413
HELPER_VECTOR_SPE_CMP(fscmpgt);
3414
/* evfscmpeq */
3415
HELPER_VECTOR_SPE_CMP(fscmpeq);
3416

    
3417
/* Double-precision floating-point conversion */
3418
uint64_t helper_efdcfsi (uint32_t val)
3419
{
3420
    CPU_DoubleU u;
3421

    
3422
    u.d = int32_to_float64(val, &env->vec_status);
3423

    
3424
    return u.ll;
3425
}
3426

    
3427
uint64_t helper_efdcfsid (uint64_t val)
3428
{
3429
    CPU_DoubleU u;
3430

    
3431
    u.d = int64_to_float64(val, &env->vec_status);
3432

    
3433
    return u.ll;
3434
}
3435

    
3436
uint64_t helper_efdcfui (uint32_t val)
3437
{
3438
    CPU_DoubleU u;
3439

    
3440
    u.d = uint32_to_float64(val, &env->vec_status);
3441

    
3442
    return u.ll;
3443
}
3444

    
3445
uint64_t helper_efdcfuid (uint64_t val)
3446
{
3447
    CPU_DoubleU u;
3448

    
3449
    u.d = uint64_to_float64(val, &env->vec_status);
3450

    
3451
    return u.ll;
3452
}
3453

    
3454
uint32_t helper_efdctsi (uint64_t val)
3455
{
3456
    CPU_DoubleU u;
3457

    
3458
    u.ll = val;
3459
    /* NaN are not treated the same way IEEE 754 does */
3460
    if (unlikely(float64_is_any_nan(u.d))) {
3461
        return 0;
3462
    }
3463

    
3464
    return float64_to_int32(u.d, &env->vec_status);
3465
}
3466

    
3467
uint32_t helper_efdctui (uint64_t val)
3468
{
3469
    CPU_DoubleU u;
3470

    
3471
    u.ll = val;
3472
    /* NaN are not treated the same way IEEE 754 does */
3473
    if (unlikely(float64_is_any_nan(u.d))) {
3474
        return 0;
3475
    }
3476

    
3477
    return float64_to_uint32(u.d, &env->vec_status);
3478
}
3479

    
3480
uint32_t helper_efdctsiz (uint64_t val)
3481
{
3482
    CPU_DoubleU u;
3483

    
3484
    u.ll = val;
3485
    /* NaN are not treated the same way IEEE 754 does */
3486
    if (unlikely(float64_is_any_nan(u.d))) {
3487
        return 0;
3488
    }
3489

    
3490
    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3491
}
3492

    
3493
uint64_t helper_efdctsidz (uint64_t val)
3494
{
3495
    CPU_DoubleU u;
3496

    
3497
    u.ll = val;
3498
    /* NaN are not treated the same way IEEE 754 does */
3499
    if (unlikely(float64_is_any_nan(u.d))) {
3500
        return 0;
3501
    }
3502

    
3503
    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3504
}
3505

    
3506
uint32_t helper_efdctuiz (uint64_t val)
3507
{
3508
    CPU_DoubleU u;
3509

    
3510
    u.ll = val;
3511
    /* NaN are not treated the same way IEEE 754 does */
3512
    if (unlikely(float64_is_any_nan(u.d))) {
3513
        return 0;
3514
    }
3515

    
3516
    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3517
}
3518

    
3519
uint64_t helper_efdctuidz (uint64_t val)
3520
{
3521
    CPU_DoubleU u;
3522

    
3523
    u.ll = val;
3524
    /* NaN are not treated the same way IEEE 754 does */
3525
    if (unlikely(float64_is_any_nan(u.d))) {
3526
        return 0;
3527
    }
3528

    
3529
    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3530
}
3531

    
3532
uint64_t helper_efdcfsf (uint32_t val)
3533
{
3534
    CPU_DoubleU u;
3535
    float64 tmp;
3536

    
3537
    u.d = int32_to_float64(val, &env->vec_status);
3538
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3539
    u.d = float64_div(u.d, tmp, &env->vec_status);
3540

    
3541
    return u.ll;
3542
}
3543

    
3544
uint64_t helper_efdcfuf (uint32_t val)
3545
{
3546
    CPU_DoubleU u;
3547
    float64 tmp;
3548

    
3549
    u.d = uint32_to_float64(val, &env->vec_status);
3550
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3551
    u.d = float64_div(u.d, tmp, &env->vec_status);
3552

    
3553
    return u.ll;
3554
}
3555

    
3556
uint32_t helper_efdctsf (uint64_t val)
3557
{
3558
    CPU_DoubleU u;
3559
    float64 tmp;
3560

    
3561
    u.ll = val;
3562
    /* NaN are not treated the same way IEEE 754 does */
3563
    if (unlikely(float64_is_any_nan(u.d))) {
3564
        return 0;
3565
    }
3566
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3567
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3568

    
3569
    return float64_to_int32(u.d, &env->vec_status);
3570
}
3571

    
3572
uint32_t helper_efdctuf (uint64_t val)
3573
{
3574
    CPU_DoubleU u;
3575
    float64 tmp;
3576

    
3577
    u.ll = val;
3578
    /* NaN are not treated the same way IEEE 754 does */
3579
    if (unlikely(float64_is_any_nan(u.d))) {
3580
        return 0;
3581
    }
3582
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3583
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3584

    
3585
    return float64_to_uint32(u.d, &env->vec_status);
3586
}
3587

    
3588
uint32_t helper_efscfd (uint64_t val)
3589
{
3590
    CPU_DoubleU u1;
3591
    CPU_FloatU u2;
3592

    
3593
    u1.ll = val;
3594
    u2.f = float64_to_float32(u1.d, &env->vec_status);
3595

    
3596
    return u2.l;
3597
}
3598

    
3599
uint64_t helper_efdcfs (uint32_t val)
3600
{
3601
    CPU_DoubleU u2;
3602
    CPU_FloatU u1;
3603

    
3604
    u1.l = val;
3605
    u2.d = float32_to_float64(u1.f, &env->vec_status);
3606

    
3607
    return u2.ll;
3608
}
3609

    
3610
/* Double precision fixed-point arithmetic */
3611
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3612
{
3613
    CPU_DoubleU u1, u2;
3614
    u1.ll = op1;
3615
    u2.ll = op2;
3616
    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3617
    return u1.ll;
3618
}
3619

    
3620
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3621
{
3622
    CPU_DoubleU u1, u2;
3623
    u1.ll = op1;
3624
    u2.ll = op2;
3625
    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3626
    return u1.ll;
3627
}
3628

    
3629
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3630
{
3631
    CPU_DoubleU u1, u2;
3632
    u1.ll = op1;
3633
    u2.ll = op2;
3634
    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3635
    return u1.ll;
3636
}
3637

    
3638
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3639
{
3640
    CPU_DoubleU u1, u2;
3641
    u1.ll = op1;
3642
    u2.ll = op2;
3643
    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3644
    return u1.ll;
3645
}
3646

    
3647
/* Double precision floating point helpers */
3648
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3649
{
3650
    CPU_DoubleU u1, u2;
3651
    u1.ll = op1;
3652
    u2.ll = op2;
3653
    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3654
}
3655

    
3656
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3657
{
3658
    CPU_DoubleU u1, u2;
3659
    u1.ll = op1;
3660
    u2.ll = op2;
3661
    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3662
}
3663

    
3664
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3665
{
3666
    CPU_DoubleU u1, u2;
3667
    u1.ll = op1;
3668
    u2.ll = op2;
3669
    return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3670
}
3671

    
3672
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3673
{
3674
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3675
    return helper_efdtstlt(op1, op2);
3676
}
3677

    
3678
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3679
{
3680
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3681
    return helper_efdtstgt(op1, op2);
3682
}
3683

    
3684
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3685
{
3686
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3687
    return helper_efdtsteq(op1, op2);
3688
}
3689

    
3690
/*****************************************************************************/
3691
/* Softmmu support */
3692
#if !defined (CONFIG_USER_ONLY)
3693

    
3694
#define MMUSUFFIX _mmu
3695

    
3696
#define SHIFT 0
3697
#include "softmmu_template.h"
3698

    
3699
#define SHIFT 1
3700
#include "softmmu_template.h"
3701

    
3702
#define SHIFT 2
3703
#include "softmmu_template.h"
3704

    
3705
#define SHIFT 3
3706
#include "softmmu_template.h"
3707

    
3708
/* try to fill the TLB and return an exception if error. If retaddr is
3709
   NULL, it means that the function was called in C code (i.e. not
3710
   from generated code or from helper.c) */
3711
/* XXX: fix it to restore all registers */
3712
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3713
{
3714
    TranslationBlock *tb;
3715
    CPUState *saved_env;
3716
    unsigned long pc;
3717
    int ret;
3718

    
3719
    /* XXX: hack to restore env in all cases, even if not called from
3720
       generated code */
3721
    saved_env = env;
3722
    env = cpu_single_env;
3723
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3724
    if (unlikely(ret != 0)) {
3725
        if (likely(retaddr)) {
3726
            /* now we have a real cpu fault */
3727
            pc = (unsigned long)retaddr;
3728
            tb = tb_find_pc(pc);
3729
            if (likely(tb)) {
3730
                /* the PC is inside the translated code. It means that we have
3731
                   a virtual CPU fault */
3732
                cpu_restore_state(tb, env, pc);
3733
            }
3734
        }
3735
        helper_raise_exception_err(env->exception_index, env->error_code);
3736
    }
3737
    env = saved_env;
3738
}
3739

    
3740
/* Segment registers load and store */
3741
target_ulong helper_load_sr (target_ulong sr_num)
3742
{
3743
#if defined(TARGET_PPC64)
3744
    if (env->mmu_model & POWERPC_MMU_64)
3745
        return ppc_load_sr(env, sr_num);
3746
#endif
3747
    return env->sr[sr_num];
3748
}
3749

    
3750
void helper_store_sr (target_ulong sr_num, target_ulong val)
3751
{
3752
    ppc_store_sr(env, sr_num, val);
3753
}
3754

    
3755
/* SLB management */
3756
#if defined(TARGET_PPC64)
3757
void helper_store_slb (target_ulong rb, target_ulong rs)
3758
{
3759
    if (ppc_store_slb(env, rb, rs) < 0) {
3760
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3761
    }
3762
}
3763

    
3764
target_ulong helper_load_slb_esid (target_ulong rb)
3765
{
3766
    target_ulong rt;
3767

    
3768
    if (ppc_load_slb_esid(env, rb, &rt) < 0) {
3769
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3770
    }
3771
    return rt;
3772
}
3773

    
3774
target_ulong helper_load_slb_vsid (target_ulong rb)
3775
{
3776
    target_ulong rt;
3777

    
3778
    if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
3779
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3780
    }
3781
    return rt;
3782
}
3783

    
3784
void helper_slbia (void)
3785
{
3786
    ppc_slb_invalidate_all(env);
3787
}
3788

    
3789
void helper_slbie (target_ulong addr)
3790
{
3791
    ppc_slb_invalidate_one(env, addr);
3792
}
3793

    
3794
#endif /* defined(TARGET_PPC64) */
3795

    
3796
/* TLB management */
3797
void helper_tlbia (void)
3798
{
3799
    ppc_tlb_invalidate_all(env);
3800
}
3801

    
3802
void helper_tlbie (target_ulong addr)
3803
{
3804
    ppc_tlb_invalidate_one(env, addr);
3805
}
3806

    
3807
/* Software driven TLBs management */
3808
/* PowerPC 602/603 software TLB load instructions helpers */
3809
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3810
{
3811
    target_ulong RPN, CMP, EPN;
3812
    int way;
3813

    
3814
    RPN = env->spr[SPR_RPA];
3815
    if (is_code) {
3816
        CMP = env->spr[SPR_ICMP];
3817
        EPN = env->spr[SPR_IMISS];
3818
    } else {
3819
        CMP = env->spr[SPR_DCMP];
3820
        EPN = env->spr[SPR_DMISS];
3821
    }
3822
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3823
    (void)EPN; /* avoid a compiler warning */
3824
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3825
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3826
              RPN, way);
3827
    /* Store this TLB */
3828
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3829
                     way, is_code, CMP, RPN);
3830
}
3831

    
3832
void helper_6xx_tlbd (target_ulong EPN)
3833
{
3834
    do_6xx_tlb(EPN, 0);
3835
}
3836

    
3837
void helper_6xx_tlbi (target_ulong EPN)
3838
{
3839
    do_6xx_tlb(EPN, 1);
3840
}
3841

    
3842
/* PowerPC 74xx software TLB load instructions helpers */
3843
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3844
{
3845
    target_ulong RPN, CMP, EPN;
3846
    int way;
3847

    
3848
    RPN = env->spr[SPR_PTELO];
3849
    CMP = env->spr[SPR_PTEHI];
3850
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3851
    way = env->spr[SPR_TLBMISS] & 0x3;
3852
    (void)EPN; /* avoid a compiler warning */
3853
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3854
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3855
              RPN, way);
3856
    /* Store this TLB */
3857
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3858
                     way, is_code, CMP, RPN);
3859
}
3860

    
3861
void helper_74xx_tlbd (target_ulong EPN)
3862
{
3863
    do_74xx_tlb(EPN, 0);
3864
}
3865

    
3866
void helper_74xx_tlbi (target_ulong EPN)
3867
{
3868
    do_74xx_tlb(EPN, 1);
3869
}
3870

    
3871
static inline target_ulong booke_tlb_to_page_size(int size)
3872
{
3873
    return 1024 << (2 * size);
3874
}
3875

    
3876
static inline int booke_page_size_to_tlb(target_ulong page_size)
3877
{
3878
    int size;
3879

    
3880
    switch (page_size) {
3881
    case 0x00000400UL:
3882
        size = 0x0;
3883
        break;
3884
    case 0x00001000UL:
3885
        size = 0x1;
3886
        break;
3887
    case 0x00004000UL:
3888
        size = 0x2;
3889
        break;
3890
    case 0x00010000UL:
3891
        size = 0x3;
3892
        break;
3893
    case 0x00040000UL:
3894
        size = 0x4;
3895
        break;
3896
    case 0x00100000UL:
3897
        size = 0x5;
3898
        break;
3899
    case 0x00400000UL:
3900
        size = 0x6;
3901
        break;
3902
    case 0x01000000UL:
3903
        size = 0x7;
3904
        break;
3905
    case 0x04000000UL:
3906
        size = 0x8;
3907
        break;
3908
    case 0x10000000UL:
3909
        size = 0x9;
3910
        break;
3911
    case 0x40000000UL:
3912
        size = 0xA;
3913
        break;
3914
#if defined (TARGET_PPC64)
3915
    case 0x000100000000ULL:
3916
        size = 0xB;
3917
        break;
3918
    case 0x000400000000ULL:
3919
        size = 0xC;
3920
        break;
3921
    case 0x001000000000ULL:
3922
        size = 0xD;
3923
        break;
3924
    case 0x004000000000ULL:
3925
        size = 0xE;
3926
        break;
3927
    case 0x010000000000ULL:
3928
        size = 0xF;
3929
        break;
3930
#endif
3931
    default:
3932
        size = -1;
3933
        break;
3934
    }
3935

    
3936
    return size;
3937
}
3938

    
3939
/* Helpers for 4xx TLB management */
3940
#define PPC4XX_TLB_ENTRY_MASK       0x0000003f  /* Mask for 64 TLB entries */
3941

    
3942
#define PPC4XX_TLBHI_V              0x00000040
3943
#define PPC4XX_TLBHI_E              0x00000020
3944
#define PPC4XX_TLBHI_SIZE_MIN       0
3945
#define PPC4XX_TLBHI_SIZE_MAX       7
3946
#define PPC4XX_TLBHI_SIZE_DEFAULT   1
3947
#define PPC4XX_TLBHI_SIZE_SHIFT     7
3948
#define PPC4XX_TLBHI_SIZE_MASK      0x00000007
3949

    
3950
#define PPC4XX_TLBLO_EX             0x00000200
3951
#define PPC4XX_TLBLO_WR             0x00000100
3952
#define PPC4XX_TLBLO_ATTR_MASK      0x000000FF
3953
#define PPC4XX_TLBLO_RPN_MASK       0xFFFFFC00
3954

    
3955
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3956
{
3957
    ppcemb_tlb_t *tlb;
3958
    target_ulong ret;
3959
    int size;
3960

    
3961
    entry &= PPC4XX_TLB_ENTRY_MASK;
3962
    tlb = &env->tlb[entry].tlbe;
3963
    ret = tlb->EPN;
3964
    if (tlb->prot & PAGE_VALID) {
3965
        ret |= PPC4XX_TLBHI_V;
3966
    }
3967
    size = booke_page_size_to_tlb(tlb->size);
3968
    if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
3969
        size = PPC4XX_TLBHI_SIZE_DEFAULT;
3970
    }
3971
    ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
3972
    env->spr[SPR_40x_PID] = tlb->PID;
3973
    return ret;
3974
}
3975

    
3976
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3977
{
3978
    ppcemb_tlb_t *tlb;
3979
    target_ulong ret;
3980

    
3981
    entry &= PPC4XX_TLB_ENTRY_MASK;
3982
    tlb = &env->tlb[entry].tlbe;
3983
    ret = tlb->RPN;
3984
    if (tlb->prot & PAGE_EXEC) {
3985
        ret |= PPC4XX_TLBLO_EX;
3986
    }
3987
    if (tlb->prot & PAGE_WRITE) {
3988
        ret |= PPC4XX_TLBLO_WR;
3989
    }
3990
    return ret;
3991
}
3992

    
3993
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3994
{
3995
    ppcemb_tlb_t *tlb;
3996
    target_ulong page, end;
3997

    
3998
    LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
3999
              val);
4000
    entry &= PPC4XX_TLB_ENTRY_MASK;
4001
    tlb = &env->tlb[entry].tlbe;
4002
    /* Invalidate previous TLB (if it's valid) */
4003
    if (tlb->prot & PAGE_VALID) {
4004
        end = tlb->EPN + tlb->size;
4005
        LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
4006
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4007
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4008
            tlb_flush_page(env, page);
4009
        }
4010
    }
4011
    tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
4012
                                       & PPC4XX_TLBHI_SIZE_MASK);
4013
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
4014
     * If this ever occurs, one should use the ppcemb target instead
4015
     * of the ppc or ppc64 one
4016
     */
4017
    if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
4018
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
4019
                  "are not supported (%d)\n",
4020
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
4021
    }
4022
    tlb->EPN = val & ~(tlb->size - 1);
4023
    if (val & PPC4XX_TLBHI_V) {
4024
        tlb->prot |= PAGE_VALID;
4025
        if (val & PPC4XX_TLBHI_E) {
4026
            /* XXX: TO BE FIXED */
4027
            cpu_abort(env,
4028
                      "Little-endian TLB entries are not supported by now\n");
4029
        }
4030
    } else {
4031
        tlb->prot &= ~PAGE_VALID;
4032
    }
4033
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
4034
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4035
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4036
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4037
              tlb->prot & PAGE_READ ? 'r' : '-',
4038
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4039
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4040
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4041
    /* Invalidate new TLB (if valid) */
4042
    if (tlb->prot & PAGE_VALID) {
4043
        end = tlb->EPN + tlb->size;
4044
        LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4045
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4046
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4047
            tlb_flush_page(env, page);
4048
        }
4049
    }
4050
}
4051

    
4052
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4053
{
4054
    ppcemb_tlb_t *tlb;
4055

    
4056
    LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4057
              val);
4058
    entry &= PPC4XX_TLB_ENTRY_MASK;
4059
    tlb = &env->tlb[entry].tlbe;
4060
    tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4061
    tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4062
    tlb->prot = PAGE_READ;
4063
    if (val & PPC4XX_TLBLO_EX) {
4064
        tlb->prot |= PAGE_EXEC;
4065
    }
4066
    if (val & PPC4XX_TLBLO_WR) {
4067
        tlb->prot |= PAGE_WRITE;
4068
    }
4069
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4070
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4071
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4072
              tlb->prot & PAGE_READ ? 'r' : '-',
4073
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4074
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4075
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4076
}
4077

    
4078
target_ulong helper_4xx_tlbsx (target_ulong address)
4079
{
4080
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4081
}
4082

    
4083
/* PowerPC 440 TLB management */
4084
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4085
{
4086
    ppcemb_tlb_t *tlb;
4087
    target_ulong EPN, RPN, size;
4088
    int do_flush_tlbs;
4089

    
4090
    LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4091
              __func__, word, (int)entry, value);
4092
    do_flush_tlbs = 0;
4093
    entry &= 0x3F;
4094
    tlb = &env->tlb[entry].tlbe;
4095
    switch (word) {
4096
    default:
4097
        /* Just here to please gcc */
4098
    case 0:
4099
        EPN = value & 0xFFFFFC00;
4100
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4101
            do_flush_tlbs = 1;
4102
        tlb->EPN = EPN;
4103
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
4104
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4105
            do_flush_tlbs = 1;
4106
        tlb->size = size;
4107
        tlb->attr &= ~0x1;
4108
        tlb->attr |= (value >> 8) & 1;
4109
        if (value & 0x200) {
4110
            tlb->prot |= PAGE_VALID;
4111
        } else {
4112
            if (tlb->prot & PAGE_VALID) {
4113
                tlb->prot &= ~PAGE_VALID;
4114
                do_flush_tlbs = 1;
4115
            }
4116
        }
4117
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4118
        if (do_flush_tlbs)
4119
            tlb_flush(env, 1);
4120
        break;
4121
    case 1:
4122
        RPN = value & 0xFFFFFC0F;
4123
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4124
            tlb_flush(env, 1);
4125
        tlb->RPN = RPN;
4126
        break;
4127
    case 2:
4128
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4129
        tlb->prot = tlb->prot & PAGE_VALID;
4130
        if (value & 0x1)
4131
            tlb->prot |= PAGE_READ << 4;
4132
        if (value & 0x2)
4133
            tlb->prot |= PAGE_WRITE << 4;
4134
        if (value & 0x4)
4135
            tlb->prot |= PAGE_EXEC << 4;
4136
        if (value & 0x8)
4137
            tlb->prot |= PAGE_READ;
4138
        if (value & 0x10)
4139
            tlb->prot |= PAGE_WRITE;
4140
        if (value & 0x20)
4141
            tlb->prot |= PAGE_EXEC;
4142
        break;
4143
    }
4144
}
4145

    
4146
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4147
{
4148
    ppcemb_tlb_t *tlb;
4149
    target_ulong ret;
4150
    int size;
4151

    
4152
    entry &= 0x3F;
4153
    tlb = &env->tlb[entry].tlbe;
4154
    switch (word) {
4155
    default:
4156
        /* Just here to please gcc */
4157
    case 0:
4158
        ret = tlb->EPN;
4159
        size = booke_page_size_to_tlb(tlb->size);
4160
        if (size < 0 || size > 0xF)
4161
            size = 1;
4162
        ret |= size << 4;
4163
        if (tlb->attr & 0x1)
4164
            ret |= 0x100;
4165
        if (tlb->prot & PAGE_VALID)
4166
            ret |= 0x200;
4167
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4168
        env->spr[SPR_440_MMUCR] |= tlb->PID;
4169
        break;
4170
    case 1:
4171
        ret = tlb->RPN;
4172
        break;
4173
    case 2:
4174
        ret = tlb->attr & ~0x1;
4175
        if (tlb->prot & (PAGE_READ << 4))
4176
            ret |= 0x1;
4177
        if (tlb->prot & (PAGE_WRITE << 4))
4178
            ret |= 0x2;
4179
        if (tlb->prot & (PAGE_EXEC << 4))
4180
            ret |= 0x4;
4181
        if (tlb->prot & PAGE_READ)
4182
            ret |= 0x8;
4183
        if (tlb->prot & PAGE_WRITE)
4184
            ret |= 0x10;
4185
        if (tlb->prot & PAGE_EXEC)
4186
            ret |= 0x20;
4187
        break;
4188
    }
4189
    return ret;
4190
}
4191

    
4192
target_ulong helper_440_tlbsx (target_ulong address)
4193
{
4194
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4195
}
4196

    
4197
/* PowerPC BookE 2.06 TLB management */
4198

    
4199
static ppcemb_tlb_t *booke206_cur_tlb(CPUState *env)
4200
{
4201
    uint32_t tlbncfg = 0;
4202
    int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
4203
    int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
4204
    int tlb;
4205

    
4206
    tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
4207
    tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
4208

    
4209
    if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
4210
        cpu_abort(env, "we don't support HES yet\n");
4211
    }
4212

    
4213
    return booke206_get_tlbe(env, tlb, ea, esel);
4214
}
4215

    
4216
static inline target_phys_addr_t booke206_tlb_to_page_size(int size)
4217
{
4218
    return (1 << (size << 1)) << 10;
4219
}
4220

    
4221
static inline target_phys_addr_t booke206_page_size_to_tlb(uint64_t size)
4222
{
4223
    return (ffs(size >> 10) - 1) >> 1;
4224
}
4225

    
4226
void helper_booke_setpid(uint32_t pidn, target_ulong pid)
4227
{
4228
    env->spr[pidn] = pid;
4229
    /* changing PIDs mean we're in a different address space now */
4230
    tlb_flush(env, 1);
4231
}
4232

    
4233
void helper_booke206_tlbwe(void)
4234
{
4235
    uint32_t tlbncfg, tlbn;
4236
    ppcemb_tlb_t *tlb;
4237
    target_phys_addr_t rpn;
4238
    int tlbe_size;
4239

    
4240
    switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
4241
    case MAS0_WQ_ALWAYS:
4242
        /* good to go, write that entry */
4243
        break;
4244
    case MAS0_WQ_COND:
4245
        /* XXX check if reserved */
4246
        if (0) {
4247
            return;
4248
        }
4249
        break;
4250
    case MAS0_WQ_CLR_RSRV:
4251
        /* XXX clear entry */
4252
        return;
4253
    default:
4254
        /* no idea what to do */
4255
        return;
4256
    }
4257

    
4258
    if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
4259
         !msr_gs) {
4260
        /* XXX we don't support direct LRAT setting yet */
4261
        fprintf(stderr, "cpu: don't support LRAT setting yet\n");
4262
        return;
4263
    }
4264

    
4265
    tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
4266
    tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
4267

    
4268
    tlb = booke206_cur_tlb(env);
4269

    
4270
    if (msr_gs) {
4271
        cpu_abort(env, "missing HV implementation\n");
4272
    } else {
4273
        rpn = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
4274
              (env->spr[SPR_BOOKE_MAS3] & 0xfffff000);
4275
    }
4276
    tlb->RPN = rpn;
4277

    
4278
    tlb->PID = (env->spr[SPR_BOOKE_MAS1] & MAS1_TID_MASK) >> MAS1_TID_SHIFT;
4279
    if (tlbncfg & TLBnCFG_AVAIL) {
4280
        tlbe_size = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK)
4281
                    >> MAS1_TSIZE_SHIFT;
4282
    } else {
4283
        tlbe_size = (tlbncfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
4284
    }
4285

    
4286
    tlb->size = booke206_tlb_to_page_size(tlbe_size);
4287
    tlb->EPN = (uint32_t)(env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
4288
    tlb->attr = env->spr[SPR_BOOKE_MAS2] & (MAS2_ACM | MAS2_VLE | MAS2_W |
4289
                                            MAS2_I | MAS2_M | MAS2_G | MAS2_E)
4290
                << 1;
4291

    
4292
    if (tlbncfg & TLBnCFG_IPROT) {
4293
        tlb->attr |= env->spr[SPR_BOOKE_MAS1] & MAS1_IPROT;
4294
    }
4295
    tlb->attr |= (env->spr[SPR_BOOKE_MAS3] &
4296
                  ((MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3)) << 8);
4297
    if (env->spr[SPR_BOOKE_MAS1] & MAS1_TS) {
4298
        tlb->attr |= 1;
4299
    }
4300

    
4301
    tlb->prot = 0;
4302

    
4303
    if (env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) {
4304
        tlb->prot |= PAGE_VALID;
4305
    }
4306
    if (env->spr[SPR_BOOKE_MAS3] & MAS3_UX) {
4307
        tlb->prot |= PAGE_EXEC;
4308
    }
4309
    if (env->spr[SPR_BOOKE_MAS3] & MAS3_SX) {
4310
        tlb->prot |= PAGE_EXEC << 4;
4311
    }
4312
    if (env->spr[SPR_BOOKE_MAS3] & MAS3_UW) {
4313
        tlb->prot |= PAGE_WRITE;
4314
    }
4315
    if (env->spr[SPR_BOOKE_MAS3] & MAS3_SW) {
4316
        tlb->prot |= PAGE_WRITE << 4;
4317
    }
4318
    if (env->spr[SPR_BOOKE_MAS3] & MAS3_UR) {
4319
        tlb->prot |= PAGE_READ;
4320
    }
4321
    if (env->spr[SPR_BOOKE_MAS3] & MAS3_SR) {
4322
        tlb->prot |= PAGE_READ << 4;
4323
    }
4324

    
4325
    if (tlb->size == TARGET_PAGE_SIZE) {
4326
        tlb_flush_page(env, tlb->EPN);
4327
    } else {
4328
        tlb_flush(env, 1);
4329
    }
4330
}
4331

    
4332
static inline void booke206_tlb_to_mas(CPUState *env, ppcemb_tlb_t *tlb)
4333
{
4334
    int tlbn = booke206_tlbe_to_tlbn(env, tlb);
4335
    int way = booke206_tlbe_to_way(env, tlb);
4336

    
4337
    env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
4338
    env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
4339

    
4340
    env->spr[SPR_BOOKE_MAS1] = MAS1_VALID;
4341
    env->spr[SPR_BOOKE_MAS2] = 0;
4342

    
4343
    env->spr[SPR_BOOKE_MAS7] = (uint64_t)tlb->RPN >> 32;
4344
    env->spr[SPR_BOOKE_MAS3] = tlb->RPN;
4345
    env->spr[SPR_BOOKE_MAS1] |= tlb->PID << MAS1_TID_SHIFT;
4346
    env->spr[SPR_BOOKE_MAS1] |= booke206_page_size_to_tlb(tlb->size)
4347
                                << MAS1_TSIZE_SHIFT;
4348
    env->spr[SPR_BOOKE_MAS1] |= tlb->attr & MAS1_IPROT;
4349
    if (tlb->attr & 1) {
4350
        env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
4351
    }
4352

    
4353
    env->spr[SPR_BOOKE_MAS2] = tlb->EPN;
4354
    env->spr[SPR_BOOKE_MAS2] |= (tlb->attr >> 1) &
4355
        (MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E);
4356

    
4357
    if (tlb->prot & PAGE_EXEC) {
4358
        env->spr[SPR_BOOKE_MAS3] |= MAS3_UX;
4359
    }
4360
    if (tlb->prot & (PAGE_EXEC << 4)) {
4361
        env->spr[SPR_BOOKE_MAS3] |= MAS3_SX;
4362
    }
4363
    if (tlb->prot & PAGE_WRITE) {
4364
        env->spr[SPR_BOOKE_MAS3] |= MAS3_UW;
4365
    }
4366
    if (tlb->prot & (PAGE_WRITE << 4)) {
4367
        env->spr[SPR_BOOKE_MAS3] |= MAS3_SW;
4368
    }
4369
    if (tlb->prot & PAGE_READ) {
4370
        env->spr[SPR_BOOKE_MAS3] |= MAS3_UR;
4371
    }
4372
    if (tlb->prot & (PAGE_READ << 4)) {
4373
        env->spr[SPR_BOOKE_MAS3] |= MAS3_SR;
4374
    }
4375

    
4376
    env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
4377
}
4378

    
4379
void helper_booke206_tlbre(void)
4380
{
4381
    ppcemb_tlb_t *tlb = NULL;
4382

    
4383
    tlb = booke206_cur_tlb(env);
4384
    booke206_tlb_to_mas(env, tlb);
4385
}
4386

    
4387
void helper_booke206_tlbsx(target_ulong address)
4388
{
4389
    ppcemb_tlb_t *tlb = NULL;
4390
    int i, j;
4391
    target_phys_addr_t raddr;
4392
    uint32_t spid, sas;
4393

    
4394
    spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
4395
    sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
4396

    
4397
    for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
4398
        int ways = booke206_tlb_ways(env, i);
4399

    
4400
        for (j = 0; j < ways; j++) {
4401
            tlb = booke206_get_tlbe(env, i, address, j);
4402

    
4403
            if (ppcemb_tlb_check(env, tlb, &raddr, address, spid, 0, j)) {
4404
                continue;
4405
            }
4406

    
4407
            if (sas != (tlb->attr & MAS6_SAS)) {
4408
                continue;
4409
            }
4410

    
4411
            booke206_tlb_to_mas(env, tlb);
4412
            return;
4413
        }
4414
    }
4415

    
4416
    /* no entry found, fill with defaults */
4417
    env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
4418
    env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
4419
    env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
4420
    env->spr[SPR_BOOKE_MAS3] = 0;
4421
    env->spr[SPR_BOOKE_MAS7] = 0;
4422

    
4423
    if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
4424
        env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
4425
    }
4426

    
4427
    env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
4428
                                << MAS1_TID_SHIFT;
4429

    
4430
    /* next victim logic */
4431
    env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
4432
    env->last_way++;
4433
    env->last_way &= booke206_tlb_ways(env, 0) - 1;
4434
    env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
4435
}
4436

    
4437
static inline void booke206_invalidate_ea_tlb(CPUState *env, int tlbn,
4438
                                              uint32_t ea)
4439
{
4440
    int i;
4441
    int ways = booke206_tlb_ways(env, tlbn);
4442

    
4443
    for (i = 0; i < ways; i++) {
4444
        ppcemb_tlb_t *tlb = booke206_get_tlbe(env, tlbn, ea, i);
4445
        target_phys_addr_t masked_ea = ea & ~(tlb->size - 1);
4446
        if ((tlb->EPN == (masked_ea >> MAS2_EPN_SHIFT)) &&
4447
            !(tlb->attr & MAS1_IPROT)) {
4448
            tlb->prot = 0;
4449
        }
4450
    }
4451
}
4452

    
4453
void helper_booke206_tlbivax(target_ulong address)
4454
{
4455
    if (address & 0x4) {
4456
        /* flush all entries */
4457
        if (address & 0x8) {
4458
            /* flush all of TLB1 */
4459
            booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
4460
        } else {
4461
            /* flush all of TLB0 */
4462
            booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
4463
        }
4464
        return;
4465
    }
4466

    
4467
    if (address & 0x8) {
4468
        /* flush TLB1 entries */
4469
        booke206_invalidate_ea_tlb(env, 1, address);
4470
        tlb_flush(env, 1);
4471
    } else {
4472
        /* flush TLB0 entries */
4473
        booke206_invalidate_ea_tlb(env, 0, address);
4474
        tlb_flush_page(env, address & MAS2_EPN_MASK);
4475
    }
4476
}
4477

    
4478
void helper_booke206_tlbflush(uint32_t type)
4479
{
4480
    int flags = 0;
4481

    
4482
    if (type & 2) {
4483
        flags |= BOOKE206_FLUSH_TLB1;
4484
    }
4485

    
4486
    if (type & 4) {
4487
        flags |= BOOKE206_FLUSH_TLB0;
4488
    }
4489

    
4490
    booke206_flush_tlb(env, flags, 1);
4491
}
4492

    
4493
#endif /* !CONFIG_USER_ONLY */