Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ dcfd14b3

History | View | Annotate | Download (134.3 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <string.h>
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "helper.h"
23

    
24
#include "helper_regs.h"
25

    
26
//#define DEBUG_OP
27
//#define DEBUG_EXCEPTIONS
28
//#define DEBUG_SOFTWARE_TLB
29

    
30
#ifdef DEBUG_SOFTWARE_TLB
31
#  define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
32
#else
33
#  define LOG_SWTLB(...) do { } while (0)
34
#endif
35

    
36

    
37
/*****************************************************************************/
38
/* Exceptions processing helpers */
39

    
40
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
41
{
42
#if 0
43
    printf("Raise exception %3x code : %d\n", exception, error_code);
44
#endif
45
    env->exception_index = exception;
46
    env->error_code = error_code;
47
    cpu_loop_exit();
48
}
49

    
50
void helper_raise_exception (uint32_t exception)
51
{
52
    helper_raise_exception_err(exception, 0);
53
}
54

    
55
/*****************************************************************************/
56
/* SPR accesses */
57
void helper_load_dump_spr (uint32_t sprn)
58
{
59
    qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
60
             env->spr[sprn]);
61
}
62

    
63
void helper_store_dump_spr (uint32_t sprn)
64
{
65
    qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
66
             env->spr[sprn]);
67
}
68

    
69
target_ulong helper_load_tbl (void)
70
{
71
    return (target_ulong)cpu_ppc_load_tbl(env);
72
}
73

    
74
target_ulong helper_load_tbu (void)
75
{
76
    return cpu_ppc_load_tbu(env);
77
}
78

    
79
target_ulong helper_load_atbl (void)
80
{
81
    return (target_ulong)cpu_ppc_load_atbl(env);
82
}
83

    
84
target_ulong helper_load_atbu (void)
85
{
86
    return cpu_ppc_load_atbu(env);
87
}
88

    
89
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
90
target_ulong helper_load_purr (void)
91
{
92
    return (target_ulong)cpu_ppc_load_purr(env);
93
}
94
#endif
95

    
96
target_ulong helper_load_601_rtcl (void)
97
{
98
    return cpu_ppc601_load_rtcl(env);
99
}
100

    
101
target_ulong helper_load_601_rtcu (void)
102
{
103
    return cpu_ppc601_load_rtcu(env);
104
}
105

    
106
#if !defined(CONFIG_USER_ONLY)
107
#if defined (TARGET_PPC64)
108
void helper_store_asr (target_ulong val)
109
{
110
    ppc_store_asr(env, val);
111
}
112
#endif
113

    
114
void helper_store_sdr1 (target_ulong val)
115
{
116
    ppc_store_sdr1(env, val);
117
}
118

    
119
void helper_store_tbl (target_ulong val)
120
{
121
    cpu_ppc_store_tbl(env, val);
122
}
123

    
124
void helper_store_tbu (target_ulong val)
125
{
126
    cpu_ppc_store_tbu(env, val);
127
}
128

    
129
void helper_store_atbl (target_ulong val)
130
{
131
    cpu_ppc_store_atbl(env, val);
132
}
133

    
134
void helper_store_atbu (target_ulong val)
135
{
136
    cpu_ppc_store_atbu(env, val);
137
}
138

    
139
void helper_store_601_rtcl (target_ulong val)
140
{
141
    cpu_ppc601_store_rtcl(env, val);
142
}
143

    
144
void helper_store_601_rtcu (target_ulong val)
145
{
146
    cpu_ppc601_store_rtcu(env, val);
147
}
148

    
149
target_ulong helper_load_decr (void)
150
{
151
    return cpu_ppc_load_decr(env);
152
}
153

    
154
void helper_store_decr (target_ulong val)
155
{
156
    cpu_ppc_store_decr(env, val);
157
}
158

    
159
void helper_store_hid0_601 (target_ulong val)
160
{
161
    target_ulong hid0;
162

    
163
    hid0 = env->spr[SPR_HID0];
164
    if ((val ^ hid0) & 0x00000008) {
165
        /* Change current endianness */
166
        env->hflags &= ~(1 << MSR_LE);
167
        env->hflags_nmsr &= ~(1 << MSR_LE);
168
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
169
        env->hflags |= env->hflags_nmsr;
170
        qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
171
                 val & 0x8 ? 'l' : 'b', env->hflags);
172
    }
173
    env->spr[SPR_HID0] = (uint32_t)val;
174
}
175

    
176
void helper_store_403_pbr (uint32_t num, target_ulong value)
177
{
178
    if (likely(env->pb[num] != value)) {
179
        env->pb[num] = value;
180
        /* Should be optimized */
181
        tlb_flush(env, 1);
182
    }
183
}
184

    
185
target_ulong helper_load_40x_pit (void)
186
{
187
    return load_40x_pit(env);
188
}
189

    
190
void helper_store_40x_pit (target_ulong val)
191
{
192
    store_40x_pit(env, val);
193
}
194

    
195
void helper_store_40x_dbcr0 (target_ulong val)
196
{
197
    store_40x_dbcr0(env, val);
198
}
199

    
200
void helper_store_40x_sler (target_ulong val)
201
{
202
    store_40x_sler(env, val);
203
}
204

    
205
void helper_store_booke_tcr (target_ulong val)
206
{
207
    store_booke_tcr(env, val);
208
}
209

    
210
void helper_store_booke_tsr (target_ulong val)
211
{
212
    store_booke_tsr(env, val);
213
}
214

    
215
void helper_store_ibatu (uint32_t nr, target_ulong val)
216
{
217
    ppc_store_ibatu(env, nr, val);
218
}
219

    
220
void helper_store_ibatl (uint32_t nr, target_ulong val)
221
{
222
    ppc_store_ibatl(env, nr, val);
223
}
224

    
225
void helper_store_dbatu (uint32_t nr, target_ulong val)
226
{
227
    ppc_store_dbatu(env, nr, val);
228
}
229

    
230
void helper_store_dbatl (uint32_t nr, target_ulong val)
231
{
232
    ppc_store_dbatl(env, nr, val);
233
}
234

    
235
void helper_store_601_batl (uint32_t nr, target_ulong val)
236
{
237
    ppc_store_ibatl_601(env, nr, val);
238
}
239

    
240
void helper_store_601_batu (uint32_t nr, target_ulong val)
241
{
242
    ppc_store_ibatu_601(env, nr, val);
243
}
244
#endif
245

    
246
/*****************************************************************************/
247
/* Memory load and stores */
248

    
249
static inline target_ulong addr_add(target_ulong addr, target_long arg)
250
{
251
#if defined(TARGET_PPC64)
252
        if (!msr_sf)
253
            return (uint32_t)(addr + arg);
254
        else
255
#endif
256
            return addr + arg;
257
}
258

    
259
void helper_lmw (target_ulong addr, uint32_t reg)
260
{
261
    for (; reg < 32; reg++) {
262
        if (msr_le)
263
            env->gpr[reg] = bswap32(ldl(addr));
264
        else
265
            env->gpr[reg] = ldl(addr);
266
        addr = addr_add(addr, 4);
267
    }
268
}
269

    
270
void helper_stmw (target_ulong addr, uint32_t reg)
271
{
272
    for (; reg < 32; reg++) {
273
        if (msr_le)
274
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
275
        else
276
            stl(addr, (uint32_t)env->gpr[reg]);
277
        addr = addr_add(addr, 4);
278
    }
279
}
280

    
281
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
282
{
283
    int sh;
284
    for (; nb > 3; nb -= 4) {
285
        env->gpr[reg] = ldl(addr);
286
        reg = (reg + 1) % 32;
287
        addr = addr_add(addr, 4);
288
    }
289
    if (unlikely(nb > 0)) {
290
        env->gpr[reg] = 0;
291
        for (sh = 24; nb > 0; nb--, sh -= 8) {
292
            env->gpr[reg] |= ldub(addr) << sh;
293
            addr = addr_add(addr, 1);
294
        }
295
    }
296
}
297
/* PPC32 specification says we must generate an exception if
298
 * rA is in the range of registers to be loaded.
299
 * In an other hand, IBM says this is valid, but rA won't be loaded.
300
 * For now, I'll follow the spec...
301
 */
302
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
303
{
304
    if (likely(xer_bc != 0)) {
305
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
306
                     (reg < rb && (reg + xer_bc) > rb))) {
307
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
308
                                       POWERPC_EXCP_INVAL |
309
                                       POWERPC_EXCP_INVAL_LSWX);
310
        } else {
311
            helper_lsw(addr, xer_bc, reg);
312
        }
313
    }
314
}
315

    
316
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
317
{
318
    int sh;
319
    for (; nb > 3; nb -= 4) {
320
        stl(addr, env->gpr[reg]);
321
        reg = (reg + 1) % 32;
322
        addr = addr_add(addr, 4);
323
    }
324
    if (unlikely(nb > 0)) {
325
        for (sh = 24; nb > 0; nb--, sh -= 8) {
326
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
327
            addr = addr_add(addr, 1);
328
        }
329
    }
330
}
331

    
332
static void do_dcbz(target_ulong addr, int dcache_line_size)
333
{
334
    addr &= ~(dcache_line_size - 1);
335
    int i;
336
    for (i = 0 ; i < dcache_line_size ; i += 4) {
337
        stl(addr + i , 0);
338
    }
339
    if (env->reserve_addr == addr)
340
        env->reserve_addr = (target_ulong)-1ULL;
341
}
342

    
343
void helper_dcbz(target_ulong addr)
344
{
345
    do_dcbz(addr, env->dcache_line_size);
346
}
347

    
348
void helper_dcbz_970(target_ulong addr)
349
{
350
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
351
        do_dcbz(addr, 32);
352
    else
353
        do_dcbz(addr, env->dcache_line_size);
354
}
355

    
356
void helper_icbi(target_ulong addr)
357
{
358
    addr &= ~(env->dcache_line_size - 1);
359
    /* Invalidate one cache line :
360
     * PowerPC specification says this is to be treated like a load
361
     * (not a fetch) by the MMU. To be sure it will be so,
362
     * do the load "by hand".
363
     */
364
    ldl(addr);
365
}
366

    
367
// XXX: to be tested
368
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
369
{
370
    int i, c, d;
371
    d = 24;
372
    for (i = 0; i < xer_bc; i++) {
373
        c = ldub(addr);
374
        addr = addr_add(addr, 1);
375
        /* ra (if not 0) and rb are never modified */
376
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
377
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
378
        }
379
        if (unlikely(c == xer_cmp))
380
            break;
381
        if (likely(d != 0)) {
382
            d -= 8;
383
        } else {
384
            d = 24;
385
            reg++;
386
            reg = reg & 0x1F;
387
        }
388
    }
389
    return i;
390
}
391

    
392
/*****************************************************************************/
393
/* Fixed point operations helpers */
394
#if defined(TARGET_PPC64)
395

    
396
/* multiply high word */
397
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
398
{
399
    uint64_t tl, th;
400

    
401
    muls64(&tl, &th, arg1, arg2);
402
    return th;
403
}
404

    
405
/* multiply high word unsigned */
406
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
407
{
408
    uint64_t tl, th;
409

    
410
    mulu64(&tl, &th, arg1, arg2);
411
    return th;
412
}
413

    
414
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
415
{
416
    int64_t th;
417
    uint64_t tl;
418

    
419
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
420
    /* If th != 0 && th != -1, then we had an overflow */
421
    if (likely((uint64_t)(th + 1) <= 1)) {
422
        env->xer &= ~(1 << XER_OV);
423
    } else {
424
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
425
    }
426
    return (int64_t)tl;
427
}
428
#endif
429

    
430
target_ulong helper_cntlzw (target_ulong t)
431
{
432
    return clz32(t);
433
}
434

    
435
#if defined(TARGET_PPC64)
436
target_ulong helper_cntlzd (target_ulong t)
437
{
438
    return clz64(t);
439
}
440
#endif
441

    
442
/* shift right arithmetic helper */
443
target_ulong helper_sraw (target_ulong value, target_ulong shift)
444
{
445
    int32_t ret;
446

    
447
    if (likely(!(shift & 0x20))) {
448
        if (likely((uint32_t)shift != 0)) {
449
            shift &= 0x1f;
450
            ret = (int32_t)value >> shift;
451
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
452
                env->xer &= ~(1 << XER_CA);
453
            } else {
454
                env->xer |= (1 << XER_CA);
455
            }
456
        } else {
457
            ret = (int32_t)value;
458
            env->xer &= ~(1 << XER_CA);
459
        }
460
    } else {
461
        ret = (int32_t)value >> 31;
462
        if (ret) {
463
            env->xer |= (1 << XER_CA);
464
        } else {
465
            env->xer &= ~(1 << XER_CA);
466
        }
467
    }
468
    return (target_long)ret;
469
}
470

    
471
#if defined(TARGET_PPC64)
472
target_ulong helper_srad (target_ulong value, target_ulong shift)
473
{
474
    int64_t ret;
475

    
476
    if (likely(!(shift & 0x40))) {
477
        if (likely((uint64_t)shift != 0)) {
478
            shift &= 0x3f;
479
            ret = (int64_t)value >> shift;
480
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
481
                env->xer &= ~(1 << XER_CA);
482
            } else {
483
                env->xer |= (1 << XER_CA);
484
            }
485
        } else {
486
            ret = (int64_t)value;
487
            env->xer &= ~(1 << XER_CA);
488
        }
489
    } else {
490
        ret = (int64_t)value >> 63;
491
        if (ret) {
492
            env->xer |= (1 << XER_CA);
493
        } else {
494
            env->xer &= ~(1 << XER_CA);
495
        }
496
    }
497
    return ret;
498
}
499
#endif
500

    
501
#if defined(TARGET_PPC64)
502
target_ulong helper_popcntb (target_ulong val)
503
{
504
    val = (val & 0x5555555555555555ULL) + ((val >>  1) &
505
                                           0x5555555555555555ULL);
506
    val = (val & 0x3333333333333333ULL) + ((val >>  2) &
507
                                           0x3333333333333333ULL);
508
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) &
509
                                           0x0f0f0f0f0f0f0f0fULL);
510
    return val;
511
}
512

    
513
target_ulong helper_popcntw (target_ulong val)
514
{
515
    val = (val & 0x5555555555555555ULL) + ((val >>  1) &
516
                                           0x5555555555555555ULL);
517
    val = (val & 0x3333333333333333ULL) + ((val >>  2) &
518
                                           0x3333333333333333ULL);
519
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) &
520
                                           0x0f0f0f0f0f0f0f0fULL);
521
    val = (val & 0x00ff00ff00ff00ffULL) + ((val >>  8) &
522
                                           0x00ff00ff00ff00ffULL);
523
    val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
524
                                           0x0000ffff0000ffffULL);
525
    return val;
526
}
527

    
528
target_ulong helper_popcntd (target_ulong val)
529
{
530
    return ctpop64(val);
531
}
532
#else
533
target_ulong helper_popcntb (target_ulong val)
534
{
535
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
536
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
537
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
538
    return val;
539
}
540

    
541
target_ulong helper_popcntw (target_ulong val)
542
{
543
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
544
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
545
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
546
    val = (val & 0x00ff00ff) + ((val >>  8) & 0x00ff00ff);
547
    val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
548
    return val;
549
}
550
#endif
551

    
552
/*****************************************************************************/
553
/* Floating point operations helpers */
554
uint64_t helper_float32_to_float64(uint32_t arg)
555
{
556
    CPU_FloatU f;
557
    CPU_DoubleU d;
558
    f.l = arg;
559
    d.d = float32_to_float64(f.f, &env->fp_status);
560
    return d.ll;
561
}
562

    
563
uint32_t helper_float64_to_float32(uint64_t arg)
564
{
565
    CPU_FloatU f;
566
    CPU_DoubleU d;
567
    d.ll = arg;
568
    f.f = float64_to_float32(d.d, &env->fp_status);
569
    return f.l;
570
}
571

    
572
static inline int isden(float64 d)
573
{
574
    CPU_DoubleU u;
575

    
576
    u.d = d;
577

    
578
    return ((u.ll >> 52) & 0x7FF) == 0;
579
}
580

    
581
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
582
{
583
    CPU_DoubleU farg;
584
    int isneg;
585
    int ret;
586
    farg.ll = arg;
587
    isneg = float64_is_neg(farg.d);
588
    if (unlikely(float64_is_any_nan(farg.d))) {
589
        if (float64_is_signaling_nan(farg.d)) {
590
            /* Signaling NaN: flags are undefined */
591
            ret = 0x00;
592
        } else {
593
            /* Quiet NaN */
594
            ret = 0x11;
595
        }
596
    } else if (unlikely(float64_is_infinity(farg.d))) {
597
        /* +/- infinity */
598
        if (isneg)
599
            ret = 0x09;
600
        else
601
            ret = 0x05;
602
    } else {
603
        if (float64_is_zero(farg.d)) {
604
            /* +/- zero */
605
            if (isneg)
606
                ret = 0x12;
607
            else
608
                ret = 0x02;
609
        } else {
610
            if (isden(farg.d)) {
611
                /* Denormalized numbers */
612
                ret = 0x10;
613
            } else {
614
                /* Normalized numbers */
615
                ret = 0x00;
616
            }
617
            if (isneg) {
618
                ret |= 0x08;
619
            } else {
620
                ret |= 0x04;
621
            }
622
        }
623
    }
624
    if (set_fprf) {
625
        /* We update FPSCR_FPRF */
626
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
627
        env->fpscr |= ret << FPSCR_FPRF;
628
    }
629
    /* We just need fpcc to update Rc1 */
630
    return ret & 0xF;
631
}
632

    
633
/* Floating-point invalid operations exception */
634
static inline uint64_t fload_invalid_op_excp(int op)
635
{
636
    uint64_t ret = 0;
637
    int ve;
638

    
639
    ve = fpscr_ve;
640
    switch (op) {
641
    case POWERPC_EXCP_FP_VXSNAN:
642
        env->fpscr |= 1 << FPSCR_VXSNAN;
643
        break;
644
    case POWERPC_EXCP_FP_VXSOFT:
645
        env->fpscr |= 1 << FPSCR_VXSOFT;
646
        break;
647
    case POWERPC_EXCP_FP_VXISI:
648
        /* Magnitude subtraction of infinities */
649
        env->fpscr |= 1 << FPSCR_VXISI;
650
        goto update_arith;
651
    case POWERPC_EXCP_FP_VXIDI:
652
        /* Division of infinity by infinity */
653
        env->fpscr |= 1 << FPSCR_VXIDI;
654
        goto update_arith;
655
    case POWERPC_EXCP_FP_VXZDZ:
656
        /* Division of zero by zero */
657
        env->fpscr |= 1 << FPSCR_VXZDZ;
658
        goto update_arith;
659
    case POWERPC_EXCP_FP_VXIMZ:
660
        /* Multiplication of zero by infinity */
661
        env->fpscr |= 1 << FPSCR_VXIMZ;
662
        goto update_arith;
663
    case POWERPC_EXCP_FP_VXVC:
664
        /* Ordered comparison of NaN */
665
        env->fpscr |= 1 << FPSCR_VXVC;
666
        env->fpscr &= ~(0xF << FPSCR_FPCC);
667
        env->fpscr |= 0x11 << FPSCR_FPCC;
668
        /* We must update the target FPR before raising the exception */
669
        if (ve != 0) {
670
            env->exception_index = POWERPC_EXCP_PROGRAM;
671
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
672
            /* Update the floating-point enabled exception summary */
673
            env->fpscr |= 1 << FPSCR_FEX;
674
            /* Exception is differed */
675
            ve = 0;
676
        }
677
        break;
678
    case POWERPC_EXCP_FP_VXSQRT:
679
        /* Square root of a negative number */
680
        env->fpscr |= 1 << FPSCR_VXSQRT;
681
    update_arith:
682
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
683
        if (ve == 0) {
684
            /* Set the result to quiet NaN */
685
            ret = 0x7FF8000000000000ULL;
686
            env->fpscr &= ~(0xF << FPSCR_FPCC);
687
            env->fpscr |= 0x11 << FPSCR_FPCC;
688
        }
689
        break;
690
    case POWERPC_EXCP_FP_VXCVI:
691
        /* Invalid conversion */
692
        env->fpscr |= 1 << FPSCR_VXCVI;
693
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
694
        if (ve == 0) {
695
            /* Set the result to quiet NaN */
696
            ret = 0x7FF8000000000000ULL;
697
            env->fpscr &= ~(0xF << FPSCR_FPCC);
698
            env->fpscr |= 0x11 << FPSCR_FPCC;
699
        }
700
        break;
701
    }
702
    /* Update the floating-point invalid operation summary */
703
    env->fpscr |= 1 << FPSCR_VX;
704
    /* Update the floating-point exception summary */
705
    env->fpscr |= 1 << FPSCR_FX;
706
    if (ve != 0) {
707
        /* Update the floating-point enabled exception summary */
708
        env->fpscr |= 1 << FPSCR_FEX;
709
        if (msr_fe0 != 0 || msr_fe1 != 0)
710
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
711
    }
712
    return ret;
713
}
714

    
715
static inline void float_zero_divide_excp(void)
716
{
717
    env->fpscr |= 1 << FPSCR_ZX;
718
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
719
    /* Update the floating-point exception summary */
720
    env->fpscr |= 1 << FPSCR_FX;
721
    if (fpscr_ze != 0) {
722
        /* Update the floating-point enabled exception summary */
723
        env->fpscr |= 1 << FPSCR_FEX;
724
        if (msr_fe0 != 0 || msr_fe1 != 0) {
725
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
726
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
727
        }
728
    }
729
}
730

    
731
static inline void float_overflow_excp(void)
732
{
733
    env->fpscr |= 1 << FPSCR_OX;
734
    /* Update the floating-point exception summary */
735
    env->fpscr |= 1 << FPSCR_FX;
736
    if (fpscr_oe != 0) {
737
        /* XXX: should adjust the result */
738
        /* Update the floating-point enabled exception summary */
739
        env->fpscr |= 1 << FPSCR_FEX;
740
        /* We must update the target FPR before raising the exception */
741
        env->exception_index = POWERPC_EXCP_PROGRAM;
742
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
743
    } else {
744
        env->fpscr |= 1 << FPSCR_XX;
745
        env->fpscr |= 1 << FPSCR_FI;
746
    }
747
}
748

    
749
static inline void float_underflow_excp(void)
750
{
751
    env->fpscr |= 1 << FPSCR_UX;
752
    /* Update the floating-point exception summary */
753
    env->fpscr |= 1 << FPSCR_FX;
754
    if (fpscr_ue != 0) {
755
        /* XXX: should adjust the result */
756
        /* Update the floating-point enabled exception summary */
757
        env->fpscr |= 1 << FPSCR_FEX;
758
        /* We must update the target FPR before raising the exception */
759
        env->exception_index = POWERPC_EXCP_PROGRAM;
760
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
761
    }
762
}
763

    
764
static inline void float_inexact_excp(void)
765
{
766
    env->fpscr |= 1 << FPSCR_XX;
767
    /* Update the floating-point exception summary */
768
    env->fpscr |= 1 << FPSCR_FX;
769
    if (fpscr_xe != 0) {
770
        /* Update the floating-point enabled exception summary */
771
        env->fpscr |= 1 << FPSCR_FEX;
772
        /* We must update the target FPR before raising the exception */
773
        env->exception_index = POWERPC_EXCP_PROGRAM;
774
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
775
    }
776
}
777

    
778
static inline void fpscr_set_rounding_mode(void)
779
{
780
    int rnd_type;
781

    
782
    /* Set rounding mode */
783
    switch (fpscr_rn) {
784
    case 0:
785
        /* Best approximation (round to nearest) */
786
        rnd_type = float_round_nearest_even;
787
        break;
788
    case 1:
789
        /* Smaller magnitude (round toward zero) */
790
        rnd_type = float_round_to_zero;
791
        break;
792
    case 2:
793
        /* Round toward +infinite */
794
        rnd_type = float_round_up;
795
        break;
796
    default:
797
    case 3:
798
        /* Round toward -infinite */
799
        rnd_type = float_round_down;
800
        break;
801
    }
802
    set_float_rounding_mode(rnd_type, &env->fp_status);
803
}
804

    
805
void helper_fpscr_clrbit (uint32_t bit)
806
{
807
    int prev;
808

    
809
    prev = (env->fpscr >> bit) & 1;
810
    env->fpscr &= ~(1 << bit);
811
    if (prev == 1) {
812
        switch (bit) {
813
        case FPSCR_RN1:
814
        case FPSCR_RN:
815
            fpscr_set_rounding_mode();
816
            break;
817
        default:
818
            break;
819
        }
820
    }
821
}
822

    
823
void helper_fpscr_setbit (uint32_t bit)
824
{
825
    int prev;
826

    
827
    prev = (env->fpscr >> bit) & 1;
828
    env->fpscr |= 1 << bit;
829
    if (prev == 0) {
830
        switch (bit) {
831
        case FPSCR_VX:
832
            env->fpscr |= 1 << FPSCR_FX;
833
            if (fpscr_ve)
834
                goto raise_ve;
835
        case FPSCR_OX:
836
            env->fpscr |= 1 << FPSCR_FX;
837
            if (fpscr_oe)
838
                goto raise_oe;
839
            break;
840
        case FPSCR_UX:
841
            env->fpscr |= 1 << FPSCR_FX;
842
            if (fpscr_ue)
843
                goto raise_ue;
844
            break;
845
        case FPSCR_ZX:
846
            env->fpscr |= 1 << FPSCR_FX;
847
            if (fpscr_ze)
848
                goto raise_ze;
849
            break;
850
        case FPSCR_XX:
851
            env->fpscr |= 1 << FPSCR_FX;
852
            if (fpscr_xe)
853
                goto raise_xe;
854
            break;
855
        case FPSCR_VXSNAN:
856
        case FPSCR_VXISI:
857
        case FPSCR_VXIDI:
858
        case FPSCR_VXZDZ:
859
        case FPSCR_VXIMZ:
860
        case FPSCR_VXVC:
861
        case FPSCR_VXSOFT:
862
        case FPSCR_VXSQRT:
863
        case FPSCR_VXCVI:
864
            env->fpscr |= 1 << FPSCR_VX;
865
            env->fpscr |= 1 << FPSCR_FX;
866
            if (fpscr_ve != 0)
867
                goto raise_ve;
868
            break;
869
        case FPSCR_VE:
870
            if (fpscr_vx != 0) {
871
            raise_ve:
872
                env->error_code = POWERPC_EXCP_FP;
873
                if (fpscr_vxsnan)
874
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
875
                if (fpscr_vxisi)
876
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
877
                if (fpscr_vxidi)
878
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
879
                if (fpscr_vxzdz)
880
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
881
                if (fpscr_vximz)
882
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
883
                if (fpscr_vxvc)
884
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
885
                if (fpscr_vxsoft)
886
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
887
                if (fpscr_vxsqrt)
888
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
889
                if (fpscr_vxcvi)
890
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
891
                goto raise_excp;
892
            }
893
            break;
894
        case FPSCR_OE:
895
            if (fpscr_ox != 0) {
896
            raise_oe:
897
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
898
                goto raise_excp;
899
            }
900
            break;
901
        case FPSCR_UE:
902
            if (fpscr_ux != 0) {
903
            raise_ue:
904
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
905
                goto raise_excp;
906
            }
907
            break;
908
        case FPSCR_ZE:
909
            if (fpscr_zx != 0) {
910
            raise_ze:
911
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
912
                goto raise_excp;
913
            }
914
            break;
915
        case FPSCR_XE:
916
            if (fpscr_xx != 0) {
917
            raise_xe:
918
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
919
                goto raise_excp;
920
            }
921
            break;
922
        case FPSCR_RN1:
923
        case FPSCR_RN:
924
            fpscr_set_rounding_mode();
925
            break;
926
        default:
927
            break;
928
        raise_excp:
929
            /* Update the floating-point enabled exception summary */
930
            env->fpscr |= 1 << FPSCR_FEX;
931
                /* We have to update Rc1 before raising the exception */
932
            env->exception_index = POWERPC_EXCP_PROGRAM;
933
            break;
934
        }
935
    }
936
}
937

    
938
void helper_store_fpscr (uint64_t arg, uint32_t mask)
939
{
940
    /*
941
     * We use only the 32 LSB of the incoming fpr
942
     */
943
    uint32_t prev, new;
944
    int i;
945

    
946
    prev = env->fpscr;
947
    new = (uint32_t)arg;
948
    new &= ~0x60000000;
949
    new |= prev & 0x60000000;
950
    for (i = 0; i < 8; i++) {
951
        if (mask & (1 << i)) {
952
            env->fpscr &= ~(0xF << (4 * i));
953
            env->fpscr |= new & (0xF << (4 * i));
954
        }
955
    }
956
    /* Update VX and FEX */
957
    if (fpscr_ix != 0)
958
        env->fpscr |= 1 << FPSCR_VX;
959
    else
960
        env->fpscr &= ~(1 << FPSCR_VX);
961
    if ((fpscr_ex & fpscr_eex) != 0) {
962
        env->fpscr |= 1 << FPSCR_FEX;
963
        env->exception_index = POWERPC_EXCP_PROGRAM;
964
        /* XXX: we should compute it properly */
965
        env->error_code = POWERPC_EXCP_FP;
966
    }
967
    else
968
        env->fpscr &= ~(1 << FPSCR_FEX);
969
    fpscr_set_rounding_mode();
970
}
971

    
972
void helper_float_check_status (void)
973
{
974
#ifdef CONFIG_SOFTFLOAT
975
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
976
        (env->error_code & POWERPC_EXCP_FP)) {
977
        /* Differred floating-point exception after target FPR update */
978
        if (msr_fe0 != 0 || msr_fe1 != 0)
979
            helper_raise_exception_err(env->exception_index, env->error_code);
980
    } else {
981
        int status = get_float_exception_flags(&env->fp_status);
982
        if (status & float_flag_divbyzero) {
983
            float_zero_divide_excp();
984
        } else if (status & float_flag_overflow) {
985
            float_overflow_excp();
986
        } else if (status & float_flag_underflow) {
987
            float_underflow_excp();
988
        } else if (status & float_flag_inexact) {
989
            float_inexact_excp();
990
        }
991
    }
992
#else
993
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
994
        (env->error_code & POWERPC_EXCP_FP)) {
995
        /* Differred floating-point exception after target FPR update */
996
        if (msr_fe0 != 0 || msr_fe1 != 0)
997
            helper_raise_exception_err(env->exception_index, env->error_code);
998
    }
999
#endif
1000
}
1001

    
1002
#ifdef CONFIG_SOFTFLOAT
1003
void helper_reset_fpstatus (void)
1004
{
1005
    set_float_exception_flags(0, &env->fp_status);
1006
}
1007
#endif
1008

    
1009
/* fadd - fadd. */
1010
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
1011
{
1012
    CPU_DoubleU farg1, farg2;
1013

    
1014
    farg1.ll = arg1;
1015
    farg2.ll = arg2;
1016

    
1017
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1018
                 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1019
        /* Magnitude subtraction of infinities */
1020
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1021
    } else {
1022
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1023
                     float64_is_signaling_nan(farg2.d))) {
1024
            /* sNaN addition */
1025
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1026
        }
1027
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1028
    }
1029

    
1030
    return farg1.ll;
1031
}
1032

    
1033
/* fsub - fsub. */
1034
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1035
{
1036
    CPU_DoubleU farg1, farg2;
1037

    
1038
    farg1.ll = arg1;
1039
    farg2.ll = arg2;
1040

    
1041
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1042
                 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1043
        /* Magnitude subtraction of infinities */
1044
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1045
    } else {
1046
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1047
                     float64_is_signaling_nan(farg2.d))) {
1048
            /* sNaN subtraction */
1049
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1050
        }
1051
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1052
    }
1053

    
1054
    return farg1.ll;
1055
}
1056

    
1057
/* fmul - fmul. */
1058
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1059
{
1060
    CPU_DoubleU farg1, farg2;
1061

    
1062
    farg1.ll = arg1;
1063
    farg2.ll = arg2;
1064

    
1065
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1066
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1067
        /* Multiplication of zero by infinity */
1068
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1069
    } else {
1070
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1071
                     float64_is_signaling_nan(farg2.d))) {
1072
            /* sNaN multiplication */
1073
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1074
        }
1075
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1076
    }
1077

    
1078
    return farg1.ll;
1079
}
1080

    
1081
/* fdiv - fdiv. */
1082
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1083
{
1084
    CPU_DoubleU farg1, farg2;
1085

    
1086
    farg1.ll = arg1;
1087
    farg2.ll = arg2;
1088

    
1089
    if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1090
        /* Division of infinity by infinity */
1091
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1092
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1093
        /* Division of zero by zero */
1094
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1095
    } else {
1096
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1097
                     float64_is_signaling_nan(farg2.d))) {
1098
            /* sNaN division */
1099
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1100
        }
1101
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1102
    }
1103

    
1104
    return farg1.ll;
1105
}
1106

    
1107
/* fabs */
1108
uint64_t helper_fabs (uint64_t arg)
1109
{
1110
    CPU_DoubleU farg;
1111

    
1112
    farg.ll = arg;
1113
    farg.d = float64_abs(farg.d);
1114
    return farg.ll;
1115
}
1116

    
1117
/* fnabs */
1118
uint64_t helper_fnabs (uint64_t arg)
1119
{
1120
    CPU_DoubleU farg;
1121

    
1122
    farg.ll = arg;
1123
    farg.d = float64_abs(farg.d);
1124
    farg.d = float64_chs(farg.d);
1125
    return farg.ll;
1126
}
1127

    
1128
/* fneg */
1129
uint64_t helper_fneg (uint64_t arg)
1130
{
1131
    CPU_DoubleU farg;
1132

    
1133
    farg.ll = arg;
1134
    farg.d = float64_chs(farg.d);
1135
    return farg.ll;
1136
}
1137

    
1138
/* fctiw - fctiw. */
1139
uint64_t helper_fctiw (uint64_t arg)
1140
{
1141
    CPU_DoubleU farg;
1142
    farg.ll = arg;
1143

    
1144
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1145
        /* sNaN conversion */
1146
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1147
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1148
        /* qNan / infinity conversion */
1149
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1150
    } else {
1151
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1152
        /* XXX: higher bits are not supposed to be significant.
1153
         *     to make tests easier, return the same as a real PowerPC 750
1154
         */
1155
        farg.ll |= 0xFFF80000ULL << 32;
1156
    }
1157
    return farg.ll;
1158
}
1159

    
1160
/* fctiwz - fctiwz. */
1161
uint64_t helper_fctiwz (uint64_t arg)
1162
{
1163
    CPU_DoubleU farg;
1164
    farg.ll = arg;
1165

    
1166
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1167
        /* sNaN conversion */
1168
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1169
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1170
        /* qNan / infinity conversion */
1171
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1172
    } else {
1173
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1174
        /* XXX: higher bits are not supposed to be significant.
1175
         *     to make tests easier, return the same as a real PowerPC 750
1176
         */
1177
        farg.ll |= 0xFFF80000ULL << 32;
1178
    }
1179
    return farg.ll;
1180
}
1181

    
1182
#if defined(TARGET_PPC64)
1183
/* fcfid - fcfid. */
1184
uint64_t helper_fcfid (uint64_t arg)
1185
{
1186
    CPU_DoubleU farg;
1187
    farg.d = int64_to_float64(arg, &env->fp_status);
1188
    return farg.ll;
1189
}
1190

    
1191
/* fctid - fctid. */
1192
uint64_t helper_fctid (uint64_t arg)
1193
{
1194
    CPU_DoubleU farg;
1195
    farg.ll = arg;
1196

    
1197
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1198
        /* sNaN conversion */
1199
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1200
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1201
        /* qNan / infinity conversion */
1202
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1203
    } else {
1204
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1205
    }
1206
    return farg.ll;
1207
}
1208

    
1209
/* fctidz - fctidz. */
1210
uint64_t helper_fctidz (uint64_t arg)
1211
{
1212
    CPU_DoubleU farg;
1213
    farg.ll = arg;
1214

    
1215
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1216
        /* sNaN conversion */
1217
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1218
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1219
        /* qNan / infinity conversion */
1220
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1221
    } else {
1222
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1223
    }
1224
    return farg.ll;
1225
}
1226

    
1227
#endif
1228

    
1229
static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1230
{
1231
    CPU_DoubleU farg;
1232
    farg.ll = arg;
1233

    
1234
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1235
        /* sNaN round */
1236
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1237
    } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1238
        /* qNan / infinity round */
1239
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1240
    } else {
1241
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1242
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1243
        /* Restore rounding mode from FPSCR */
1244
        fpscr_set_rounding_mode();
1245
    }
1246
    return farg.ll;
1247
}
1248

    
1249
uint64_t helper_frin (uint64_t arg)
1250
{
1251
    return do_fri(arg, float_round_nearest_even);
1252
}
1253

    
1254
uint64_t helper_friz (uint64_t arg)
1255
{
1256
    return do_fri(arg, float_round_to_zero);
1257
}
1258

    
1259
uint64_t helper_frip (uint64_t arg)
1260
{
1261
    return do_fri(arg, float_round_up);
1262
}
1263

    
1264
uint64_t helper_frim (uint64_t arg)
1265
{
1266
    return do_fri(arg, float_round_down);
1267
}
1268

    
1269
/* fmadd - fmadd. */
1270
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1271
{
1272
    CPU_DoubleU farg1, farg2, farg3;
1273

    
1274
    farg1.ll = arg1;
1275
    farg2.ll = arg2;
1276
    farg3.ll = arg3;
1277

    
1278
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1279
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1280
        /* Multiplication of zero by infinity */
1281
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1282
    } else {
1283
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1284
                     float64_is_signaling_nan(farg2.d) ||
1285
                     float64_is_signaling_nan(farg3.d))) {
1286
            /* sNaN operation */
1287
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1288
        }
1289
        /* This is the way the PowerPC specification defines it */
1290
        float128 ft0_128, ft1_128;
1291

    
1292
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1293
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1294
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1295
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1296
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1297
            /* Magnitude subtraction of infinities */
1298
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1299
        } else {
1300
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1301
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1302
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1303
        }
1304
    }
1305

    
1306
    return farg1.ll;
1307
}
1308

    
1309
/* fmsub - fmsub. */
1310
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1311
{
1312
    CPU_DoubleU farg1, farg2, farg3;
1313

    
1314
    farg1.ll = arg1;
1315
    farg2.ll = arg2;
1316
    farg3.ll = arg3;
1317

    
1318
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1319
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1320
        /* Multiplication of zero by infinity */
1321
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1322
    } else {
1323
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1324
                     float64_is_signaling_nan(farg2.d) ||
1325
                     float64_is_signaling_nan(farg3.d))) {
1326
            /* sNaN operation */
1327
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1328
        }
1329
        /* This is the way the PowerPC specification defines it */
1330
        float128 ft0_128, ft1_128;
1331

    
1332
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1333
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1334
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1335
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1336
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1337
            /* Magnitude subtraction of infinities */
1338
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1339
        } else {
1340
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1341
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1342
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1343
        }
1344
    }
1345
    return farg1.ll;
1346
}
1347

    
1348
/* fnmadd - fnmadd. */
1349
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1350
{
1351
    CPU_DoubleU farg1, farg2, farg3;
1352

    
1353
    farg1.ll = arg1;
1354
    farg2.ll = arg2;
1355
    farg3.ll = arg3;
1356

    
1357
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1358
                 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1359
        /* Multiplication of zero by infinity */
1360
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1361
    } else {
1362
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1363
                     float64_is_signaling_nan(farg2.d) ||
1364
                     float64_is_signaling_nan(farg3.d))) {
1365
            /* sNaN operation */
1366
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1367
        }
1368
        /* This is the way the PowerPC specification defines it */
1369
        float128 ft0_128, ft1_128;
1370

    
1371
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1372
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1373
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1374
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1375
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1376
            /* Magnitude subtraction of infinities */
1377
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1378
        } else {
1379
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1380
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1381
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1382
        }
1383
        if (likely(!float64_is_any_nan(farg1.d))) {
1384
            farg1.d = float64_chs(farg1.d);
1385
        }
1386
    }
1387
    return farg1.ll;
1388
}
1389

    
1390
/* fnmsub - fnmsub. */
1391
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1392
{
1393
    CPU_DoubleU farg1, farg2, farg3;
1394

    
1395
    farg1.ll = arg1;
1396
    farg2.ll = arg2;
1397
    farg3.ll = arg3;
1398

    
1399
    if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1400
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1401
        /* Multiplication of zero by infinity */
1402
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1403
    } else {
1404
        if (unlikely(float64_is_signaling_nan(farg1.d) ||
1405
                     float64_is_signaling_nan(farg2.d) ||
1406
                     float64_is_signaling_nan(farg3.d))) {
1407
            /* sNaN operation */
1408
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1409
        }
1410
        /* This is the way the PowerPC specification defines it */
1411
        float128 ft0_128, ft1_128;
1412

    
1413
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1414
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1415
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1416
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1417
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1418
            /* Magnitude subtraction of infinities */
1419
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1420
        } else {
1421
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1422
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1423
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1424
        }
1425
        if (likely(!float64_is_any_nan(farg1.d))) {
1426
            farg1.d = float64_chs(farg1.d);
1427
        }
1428
    }
1429
    return farg1.ll;
1430
}
1431

    
1432
/* frsp - frsp. */
1433
uint64_t helper_frsp (uint64_t arg)
1434
{
1435
    CPU_DoubleU farg;
1436
    float32 f32;
1437
    farg.ll = arg;
1438

    
1439
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1440
        /* sNaN square root */
1441
       fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1442
    }
1443
    f32 = float64_to_float32(farg.d, &env->fp_status);
1444
    farg.d = float32_to_float64(f32, &env->fp_status);
1445

    
1446
    return farg.ll;
1447
}
1448

    
1449
/* fsqrt - fsqrt. */
1450
uint64_t helper_fsqrt (uint64_t arg)
1451
{
1452
    CPU_DoubleU farg;
1453
    farg.ll = arg;
1454

    
1455
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1456
        /* Square root of a negative nonzero number */
1457
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1458
    } else {
1459
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1460
            /* sNaN square root */
1461
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1462
        }
1463
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1464
    }
1465
    return farg.ll;
1466
}
1467

    
1468
/* fre - fre. */
1469
uint64_t helper_fre (uint64_t arg)
1470
{
1471
    CPU_DoubleU farg;
1472
    farg.ll = arg;
1473

    
1474
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1475
        /* sNaN reciprocal */
1476
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1477
    }
1478
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1479
    return farg.d;
1480
}
1481

    
1482
/* fres - fres. */
1483
uint64_t helper_fres (uint64_t arg)
1484
{
1485
    CPU_DoubleU farg;
1486
    float32 f32;
1487
    farg.ll = arg;
1488

    
1489
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1490
        /* sNaN reciprocal */
1491
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1492
    }
1493
    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1494
    f32 = float64_to_float32(farg.d, &env->fp_status);
1495
    farg.d = float32_to_float64(f32, &env->fp_status);
1496

    
1497
    return farg.ll;
1498
}
1499

    
1500
/* frsqrte  - frsqrte. */
1501
uint64_t helper_frsqrte (uint64_t arg)
1502
{
1503
    CPU_DoubleU farg;
1504
    float32 f32;
1505
    farg.ll = arg;
1506

    
1507
    if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1508
        /* Reciprocal square root of a negative nonzero number */
1509
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1510
    } else {
1511
        if (unlikely(float64_is_signaling_nan(farg.d))) {
1512
            /* sNaN reciprocal square root */
1513
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1514
        }
1515
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1516
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1517
        f32 = float64_to_float32(farg.d, &env->fp_status);
1518
        farg.d = float32_to_float64(f32, &env->fp_status);
1519
    }
1520
    return farg.ll;
1521
}
1522

    
1523
/* fsel - fsel. */
1524
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1525
{
1526
    CPU_DoubleU farg1;
1527

    
1528
    farg1.ll = arg1;
1529

    
1530
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_any_nan(farg1.d)) {
1531
        return arg2;
1532
    } else {
1533
        return arg3;
1534
    }
1535
}
1536

    
1537
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1538
{
1539
    CPU_DoubleU farg1, farg2;
1540
    uint32_t ret = 0;
1541
    farg1.ll = arg1;
1542
    farg2.ll = arg2;
1543

    
1544
    if (unlikely(float64_is_any_nan(farg1.d) ||
1545
                 float64_is_any_nan(farg2.d))) {
1546
        ret = 0x01UL;
1547
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1548
        ret = 0x08UL;
1549
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1550
        ret = 0x04UL;
1551
    } else {
1552
        ret = 0x02UL;
1553
    }
1554

    
1555
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1556
    env->fpscr |= ret << FPSCR_FPRF;
1557
    env->crf[crfD] = ret;
1558
    if (unlikely(ret == 0x01UL
1559
                 && (float64_is_signaling_nan(farg1.d) ||
1560
                     float64_is_signaling_nan(farg2.d)))) {
1561
        /* sNaN comparison */
1562
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1563
    }
1564
}
1565

    
1566
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1567
{
1568
    CPU_DoubleU farg1, farg2;
1569
    uint32_t ret = 0;
1570
    farg1.ll = arg1;
1571
    farg2.ll = arg2;
1572

    
1573
    if (unlikely(float64_is_any_nan(farg1.d) ||
1574
                 float64_is_any_nan(farg2.d))) {
1575
        ret = 0x01UL;
1576
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1577
        ret = 0x08UL;
1578
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1579
        ret = 0x04UL;
1580
    } else {
1581
        ret = 0x02UL;
1582
    }
1583

    
1584
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1585
    env->fpscr |= ret << FPSCR_FPRF;
1586
    env->crf[crfD] = ret;
1587
    if (unlikely (ret == 0x01UL)) {
1588
        if (float64_is_signaling_nan(farg1.d) ||
1589
            float64_is_signaling_nan(farg2.d)) {
1590
            /* sNaN comparison */
1591
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1592
                                  POWERPC_EXCP_FP_VXVC);
1593
        } else {
1594
            /* qNaN comparison */
1595
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1596
        }
1597
    }
1598
}
1599

    
1600
#if !defined (CONFIG_USER_ONLY)
1601
void helper_store_msr (target_ulong val)
1602
{
1603
    val = hreg_store_msr(env, val, 0);
1604
    if (val != 0) {
1605
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1606
        helper_raise_exception(val);
1607
    }
1608
}
1609

    
1610
static inline void do_rfi(target_ulong nip, target_ulong msr,
1611
                          target_ulong msrm, int keep_msrh)
1612
{
1613
#if defined(TARGET_PPC64)
1614
    if (msr & (1ULL << MSR_SF)) {
1615
        nip = (uint64_t)nip;
1616
        msr &= (uint64_t)msrm;
1617
    } else {
1618
        nip = (uint32_t)nip;
1619
        msr = (uint32_t)(msr & msrm);
1620
        if (keep_msrh)
1621
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1622
    }
1623
#else
1624
    nip = (uint32_t)nip;
1625
    msr &= (uint32_t)msrm;
1626
#endif
1627
    /* XXX: beware: this is false if VLE is supported */
1628
    env->nip = nip & ~((target_ulong)0x00000003);
1629
    hreg_store_msr(env, msr, 1);
1630
#if defined (DEBUG_OP)
1631
    cpu_dump_rfi(env->nip, env->msr);
1632
#endif
1633
    /* No need to raise an exception here,
1634
     * as rfi is always the last insn of a TB
1635
     */
1636
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1637
}
1638

    
1639
void helper_rfi (void)
1640
{
1641
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1642
           ~((target_ulong)0x783F0000), 1);
1643
}
1644

    
1645
#if defined(TARGET_PPC64)
1646
void helper_rfid (void)
1647
{
1648
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1649
           ~((target_ulong)0x783F0000), 0);
1650
}
1651

    
1652
void helper_hrfid (void)
1653
{
1654
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1655
           ~((target_ulong)0x783F0000), 0);
1656
}
1657
#endif
1658
#endif
1659

    
1660
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1661
{
1662
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1663
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1664
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1665
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1666
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1667
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1668
    }
1669
}
1670

    
1671
#if defined(TARGET_PPC64)
1672
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1673
{
1674
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1675
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1676
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1677
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1678
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1679
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1680
}
1681
#endif
1682

    
1683
/*****************************************************************************/
1684
/* PowerPC 601 specific instructions (POWER bridge) */
1685

    
1686
target_ulong helper_clcs (uint32_t arg)
1687
{
1688
    switch (arg) {
1689
    case 0x0CUL:
1690
        /* Instruction cache line size */
1691
        return env->icache_line_size;
1692
        break;
1693
    case 0x0DUL:
1694
        /* Data cache line size */
1695
        return env->dcache_line_size;
1696
        break;
1697
    case 0x0EUL:
1698
        /* Minimum cache line size */
1699
        return (env->icache_line_size < env->dcache_line_size) ?
1700
                env->icache_line_size : env->dcache_line_size;
1701
        break;
1702
    case 0x0FUL:
1703
        /* Maximum cache line size */
1704
        return (env->icache_line_size > env->dcache_line_size) ?
1705
                env->icache_line_size : env->dcache_line_size;
1706
        break;
1707
    default:
1708
        /* Undefined */
1709
        return 0;
1710
        break;
1711
    }
1712
}
1713

    
1714
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1715
{
1716
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1717

    
1718
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1719
        (int32_t)arg2 == 0) {
1720
        env->spr[SPR_MQ] = 0;
1721
        return INT32_MIN;
1722
    } else {
1723
        env->spr[SPR_MQ] = tmp % arg2;
1724
        return  tmp / (int32_t)arg2;
1725
    }
1726
}
1727

    
1728
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1729
{
1730
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1731

    
1732
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1733
        (int32_t)arg2 == 0) {
1734
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1735
        env->spr[SPR_MQ] = 0;
1736
        return INT32_MIN;
1737
    } else {
1738
        env->spr[SPR_MQ] = tmp % arg2;
1739
        tmp /= (int32_t)arg2;
1740
        if ((int32_t)tmp != tmp) {
1741
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1742
        } else {
1743
            env->xer &= ~(1 << XER_OV);
1744
        }
1745
        return tmp;
1746
    }
1747
}
1748

    
1749
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1750
{
1751
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1752
        (int32_t)arg2 == 0) {
1753
        env->spr[SPR_MQ] = 0;
1754
        return INT32_MIN;
1755
    } else {
1756
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1757
        return (int32_t)arg1 / (int32_t)arg2;
1758
    }
1759
}
1760

    
1761
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1762
{
1763
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1764
        (int32_t)arg2 == 0) {
1765
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1766
        env->spr[SPR_MQ] = 0;
1767
        return INT32_MIN;
1768
    } else {
1769
        env->xer &= ~(1 << XER_OV);
1770
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1771
        return (int32_t)arg1 / (int32_t)arg2;
1772
    }
1773
}
1774

    
1775
#if !defined (CONFIG_USER_ONLY)
1776
target_ulong helper_rac (target_ulong addr)
1777
{
1778
    mmu_ctx_t ctx;
1779
    int nb_BATs;
1780
    target_ulong ret = 0;
1781

    
1782
    /* We don't have to generate many instances of this instruction,
1783
     * as rac is supervisor only.
1784
     */
1785
    /* XXX: FIX THIS: Pretend we have no BAT */
1786
    nb_BATs = env->nb_BATs;
1787
    env->nb_BATs = 0;
1788
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1789
        ret = ctx.raddr;
1790
    env->nb_BATs = nb_BATs;
1791
    return ret;
1792
}
1793

    
1794
void helper_rfsvc (void)
1795
{
1796
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1797
}
1798
#endif
1799

    
1800
/*****************************************************************************/
1801
/* 602 specific instructions */
1802
/* mfrom is the most crazy instruction ever seen, imho ! */
1803
/* Real implementation uses a ROM table. Do the same */
1804
/* Extremly decomposed:
1805
 *                      -arg / 256
1806
 * return 256 * log10(10           + 1.0) + 0.5
1807
 */
1808
#if !defined (CONFIG_USER_ONLY)
1809
target_ulong helper_602_mfrom (target_ulong arg)
1810
{
1811
    if (likely(arg < 602)) {
1812
#include "mfrom_table.c"
1813
        return mfrom_ROM_table[arg];
1814
    } else {
1815
        return 0;
1816
    }
1817
}
1818
#endif
1819

    
1820
/*****************************************************************************/
1821
/* Embedded PowerPC specific helpers */
1822

    
1823
/* XXX: to be improved to check access rights when in user-mode */
1824
target_ulong helper_load_dcr (target_ulong dcrn)
1825
{
1826
    uint32_t val = 0;
1827

    
1828
    if (unlikely(env->dcr_env == NULL)) {
1829
        qemu_log("No DCR environment\n");
1830
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1831
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1832
    } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1833
        qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1834
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1835
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1836
    }
1837
    return val;
1838
}
1839

    
1840
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1841
{
1842
    if (unlikely(env->dcr_env == NULL)) {
1843
        qemu_log("No DCR environment\n");
1844
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1845
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1846
    } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1847
        qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1848
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1849
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1850
    }
1851
}
1852

    
1853
#if !defined(CONFIG_USER_ONLY)
1854
void helper_40x_rfci (void)
1855
{
1856
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1857
           ~((target_ulong)0xFFFF0000), 0);
1858
}
1859

    
1860
void helper_rfci (void)
1861
{
1862
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1863
           ~((target_ulong)0x3FFF0000), 0);
1864
}
1865

    
1866
void helper_rfdi (void)
1867
{
1868
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1869
           ~((target_ulong)0x3FFF0000), 0);
1870
}
1871

    
1872
void helper_rfmci (void)
1873
{
1874
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1875
           ~((target_ulong)0x3FFF0000), 0);
1876
}
1877
#endif
1878

    
1879
/* 440 specific */
1880
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1881
{
1882
    target_ulong mask;
1883
    int i;
1884

    
1885
    i = 1;
1886
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1887
        if ((high & mask) == 0) {
1888
            if (update_Rc) {
1889
                env->crf[0] = 0x4;
1890
            }
1891
            goto done;
1892
        }
1893
        i++;
1894
    }
1895
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1896
        if ((low & mask) == 0) {
1897
            if (update_Rc) {
1898
                env->crf[0] = 0x8;
1899
            }
1900
            goto done;
1901
        }
1902
        i++;
1903
    }
1904
    if (update_Rc) {
1905
        env->crf[0] = 0x2;
1906
    }
1907
 done:
1908
    env->xer = (env->xer & ~0x7F) | i;
1909
    if (update_Rc) {
1910
        env->crf[0] |= xer_so;
1911
    }
1912
    return i;
1913
}
1914

    
1915
/*****************************************************************************/
1916
/* Altivec extension helpers */
1917
#if defined(HOST_WORDS_BIGENDIAN)
1918
#define HI_IDX 0
1919
#define LO_IDX 1
1920
#else
1921
#define HI_IDX 1
1922
#define LO_IDX 0
1923
#endif
1924

    
1925
#if defined(HOST_WORDS_BIGENDIAN)
1926
#define VECTOR_FOR_INORDER_I(index, element)            \
1927
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1928
#else
1929
#define VECTOR_FOR_INORDER_I(index, element)            \
1930
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1931
#endif
1932

    
1933
/* If X is a NaN, store the corresponding QNaN into RESULT.  Otherwise,
1934
 * execute the following block.  */
1935
#define DO_HANDLE_NAN(result, x)                \
1936
    if (float32_is_any_nan(x)) {                                \
1937
        CPU_FloatU __f;                                         \
1938
        __f.f = x;                                              \
1939
        __f.l = __f.l | (1 << 22);  /* Set QNaN bit. */         \
1940
        result = __f.f;                                         \
1941
    } else
1942

    
1943
#define HANDLE_NAN1(result, x)                  \
1944
    DO_HANDLE_NAN(result, x)
1945
#define HANDLE_NAN2(result, x, y)               \
1946
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1947
#define HANDLE_NAN3(result, x, y, z)            \
1948
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1949

    
1950
/* Saturating arithmetic helpers.  */
1951
#define SATCVT(from, to, from_type, to_type, min, max)                  \
1952
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1953
    {                                                                   \
1954
        to_type r;                                                      \
1955
        if (x < (from_type)min) {                                       \
1956
            r = min;                                                    \
1957
            *sat = 1;                                                   \
1958
        } else if (x > (from_type)max) {                                \
1959
            r = max;                                                    \
1960
            *sat = 1;                                                   \
1961
        } else {                                                        \
1962
            r = x;                                                      \
1963
        }                                                               \
1964
        return r;                                                       \
1965
    }
1966
#define SATCVTU(from, to, from_type, to_type, min, max)                 \
1967
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1968
    {                                                                   \
1969
        to_type r;                                                      \
1970
        if (x > (from_type)max) {                                       \
1971
            r = max;                                                    \
1972
            *sat = 1;                                                   \
1973
        } else {                                                        \
1974
            r = x;                                                      \
1975
        }                                                               \
1976
        return r;                                                       \
1977
    }
1978
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
1979
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
1980
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
1981

    
1982
SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
1983
SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
1984
SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
1985
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
1986
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
1987
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
1988
#undef SATCVT
1989
#undef SATCVTU
1990

    
1991
#define LVE(name, access, swap, element)                        \
1992
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
1993
    {                                                           \
1994
        size_t n_elems = ARRAY_SIZE(r->element);                \
1995
        int adjust = HI_IDX*(n_elems-1);                        \
1996
        int sh = sizeof(r->element[0]) >> 1;                    \
1997
        int index = (addr & 0xf) >> sh;                         \
1998
        if(msr_le) {                                            \
1999
            r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2000
        } else {                                                        \
2001
            r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2002
        }                                                               \
2003
    }
2004
#define I(x) (x)
2005
LVE(lvebx, ldub, I, u8)
2006
LVE(lvehx, lduw, bswap16, u16)
2007
LVE(lvewx, ldl, bswap32, u32)
2008
#undef I
2009
#undef LVE
2010

    
2011
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2012
{
2013
    int i, j = (sh & 0xf);
2014

    
2015
    VECTOR_FOR_INORDER_I (i, u8) {
2016
        r->u8[i] = j++;
2017
    }
2018
}
2019

    
2020
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2021
{
2022
    int i, j = 0x10 - (sh & 0xf);
2023

    
2024
    VECTOR_FOR_INORDER_I (i, u8) {
2025
        r->u8[i] = j++;
2026
    }
2027
}
2028

    
2029
#define STVE(name, access, swap, element)                       \
2030
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2031
    {                                                           \
2032
        size_t n_elems = ARRAY_SIZE(r->element);                \
2033
        int adjust = HI_IDX*(n_elems-1);                        \
2034
        int sh = sizeof(r->element[0]) >> 1;                    \
2035
        int index = (addr & 0xf) >> sh;                         \
2036
        if(msr_le) {                                            \
2037
            access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2038
        } else {                                                        \
2039
            access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2040
        }                                                               \
2041
    }
2042
#define I(x) (x)
2043
STVE(stvebx, stb, I, u8)
2044
STVE(stvehx, stw, bswap16, u16)
2045
STVE(stvewx, stl, bswap32, u32)
2046
#undef I
2047
#undef LVE
2048

    
2049
void helper_mtvscr (ppc_avr_t *r)
2050
{
2051
#if defined(HOST_WORDS_BIGENDIAN)
2052
    env->vscr = r->u32[3];
2053
#else
2054
    env->vscr = r->u32[0];
2055
#endif
2056
    set_flush_to_zero(vscr_nj, &env->vec_status);
2057
}
2058

    
2059
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2060
{
2061
    int i;
2062
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2063
        r->u32[i] = ~a->u32[i] < b->u32[i];
2064
    }
2065
}
2066

    
2067
#define VARITH_DO(name, op, element)        \
2068
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2069
{                                                                       \
2070
    int i;                                                              \
2071
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2072
        r->element[i] = a->element[i] op b->element[i];                 \
2073
    }                                                                   \
2074
}
2075
#define VARITH(suffix, element)                  \
2076
  VARITH_DO(add##suffix, +, element)             \
2077
  VARITH_DO(sub##suffix, -, element)
2078
VARITH(ubm, u8)
2079
VARITH(uhm, u16)
2080
VARITH(uwm, u32)
2081
#undef VARITH_DO
2082
#undef VARITH
2083

    
2084
#define VARITHFP(suffix, func)                                          \
2085
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2086
    {                                                                   \
2087
        int i;                                                          \
2088
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2089
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2090
                r->f[i] = func(a->f[i], b->f[i], &env->vec_status);     \
2091
            }                                                           \
2092
        }                                                               \
2093
    }
2094
VARITHFP(addfp, float32_add)
2095
VARITHFP(subfp, float32_sub)
2096
#undef VARITHFP
2097

    
2098
#define VARITHSAT_CASE(type, op, cvt, element)                          \
2099
    {                                                                   \
2100
        type result = (type)a->element[i] op (type)b->element[i];       \
2101
        r->element[i] = cvt(result, &sat);                              \
2102
    }
2103

    
2104
#define VARITHSAT_DO(name, op, optype, cvt, element)                    \
2105
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2106
    {                                                                   \
2107
        int sat = 0;                                                    \
2108
        int i;                                                          \
2109
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2110
            switch (sizeof(r->element[0])) {                            \
2111
            case 1: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2112
            case 2: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2113
            case 4: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2114
            }                                                           \
2115
        }                                                               \
2116
        if (sat) {                                                      \
2117
            env->vscr |= (1 << VSCR_SAT);                               \
2118
        }                                                               \
2119
    }
2120
#define VARITHSAT_SIGNED(suffix, element, optype, cvt)        \
2121
    VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)    \
2122
    VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2123
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)       \
2124
    VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)     \
2125
    VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2126
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2127
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2128
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2129
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2130
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2131
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2132
#undef VARITHSAT_CASE
2133
#undef VARITHSAT_DO
2134
#undef VARITHSAT_SIGNED
2135
#undef VARITHSAT_UNSIGNED
2136

    
2137
#define VAVG_DO(name, element, etype)                                   \
2138
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2139
    {                                                                   \
2140
        int i;                                                          \
2141
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2142
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2143
            r->element[i] = x >> 1;                                     \
2144
        }                                                               \
2145
    }
2146

    
2147
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2148
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2149
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2150
VAVG(b, s8, int16_t, u8, uint16_t)
2151
VAVG(h, s16, int32_t, u16, uint32_t)
2152
VAVG(w, s32, int64_t, u32, uint64_t)
2153
#undef VAVG_DO
2154
#undef VAVG
2155

    
2156
#define VCF(suffix, cvt, element)                                       \
2157
    void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2158
    {                                                                   \
2159
        int i;                                                          \
2160
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2161
            float32 t = cvt(b->element[i], &env->vec_status);           \
2162
            r->f[i] = float32_scalbn (t, -uim, &env->vec_status);       \
2163
        }                                                               \
2164
    }
2165
VCF(ux, uint32_to_float32, u32)
2166
VCF(sx, int32_to_float32, s32)
2167
#undef VCF
2168

    
2169
#define VCMP_DO(suffix, compare, element, record)                       \
2170
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2171
    {                                                                   \
2172
        uint32_t ones = (uint32_t)-1;                                   \
2173
        uint32_t all = ones;                                            \
2174
        uint32_t none = 0;                                              \
2175
        int i;                                                          \
2176
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2177
            uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2178
            switch (sizeof (a->element[0])) {                           \
2179
            case 4: r->u32[i] = result; break;                          \
2180
            case 2: r->u16[i] = result; break;                          \
2181
            case 1: r->u8[i] = result; break;                           \
2182
            }                                                           \
2183
            all &= result;                                              \
2184
            none |= result;                                             \
2185
        }                                                               \
2186
        if (record) {                                                   \
2187
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2188
        }                                                               \
2189
    }
2190
#define VCMP(suffix, compare, element)          \
2191
    VCMP_DO(suffix, compare, element, 0)        \
2192
    VCMP_DO(suffix##_dot, compare, element, 1)
2193
VCMP(equb, ==, u8)
2194
VCMP(equh, ==, u16)
2195
VCMP(equw, ==, u32)
2196
VCMP(gtub, >, u8)
2197
VCMP(gtuh, >, u16)
2198
VCMP(gtuw, >, u32)
2199
VCMP(gtsb, >, s8)
2200
VCMP(gtsh, >, s16)
2201
VCMP(gtsw, >, s32)
2202
#undef VCMP_DO
2203
#undef VCMP
2204

    
2205
#define VCMPFP_DO(suffix, compare, order, record)                       \
2206
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2207
    {                                                                   \
2208
        uint32_t ones = (uint32_t)-1;                                   \
2209
        uint32_t all = ones;                                            \
2210
        uint32_t none = 0;                                              \
2211
        int i;                                                          \
2212
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2213
            uint32_t result;                                            \
2214
            int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2215
            if (rel == float_relation_unordered) {                      \
2216
                result = 0;                                             \
2217
            } else if (rel compare order) {                             \
2218
                result = ones;                                          \
2219
            } else {                                                    \
2220
                result = 0;                                             \
2221
            }                                                           \
2222
            r->u32[i] = result;                                         \
2223
            all &= result;                                              \
2224
            none |= result;                                             \
2225
        }                                                               \
2226
        if (record) {                                                   \
2227
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2228
        }                                                               \
2229
    }
2230
#define VCMPFP(suffix, compare, order)           \
2231
    VCMPFP_DO(suffix, compare, order, 0)         \
2232
    VCMPFP_DO(suffix##_dot, compare, order, 1)
2233
VCMPFP(eqfp, ==, float_relation_equal)
2234
VCMPFP(gefp, !=, float_relation_less)
2235
VCMPFP(gtfp, ==, float_relation_greater)
2236
#undef VCMPFP_DO
2237
#undef VCMPFP
2238

    
2239
static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2240
                                    int record)
2241
{
2242
    int i;
2243
    int all_in = 0;
2244
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2245
        int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2246
        if (le_rel == float_relation_unordered) {
2247
            r->u32[i] = 0xc0000000;
2248
            /* ALL_IN does not need to be updated here.  */
2249
        } else {
2250
            float32 bneg = float32_chs(b->f[i]);
2251
            int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2252
            int le = le_rel != float_relation_greater;
2253
            int ge = ge_rel != float_relation_less;
2254
            r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2255
            all_in |= (!le | !ge);
2256
        }
2257
    }
2258
    if (record) {
2259
        env->crf[6] = (all_in == 0) << 1;
2260
    }
2261
}
2262

    
2263
void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2264
{
2265
    vcmpbfp_internal(r, a, b, 0);
2266
}
2267

    
2268
void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2269
{
2270
    vcmpbfp_internal(r, a, b, 1);
2271
}
2272

    
2273
#define VCT(suffix, satcvt, element)                                    \
2274
    void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2275
    {                                                                   \
2276
        int i;                                                          \
2277
        int sat = 0;                                                    \
2278
        float_status s = env->vec_status;                               \
2279
        set_float_rounding_mode(float_round_to_zero, &s);               \
2280
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2281
            if (float32_is_any_nan(b->f[i])) {                          \
2282
                r->element[i] = 0;                                      \
2283
            } else {                                                    \
2284
                float64 t = float32_to_float64(b->f[i], &s);            \
2285
                int64_t j;                                              \
2286
                t = float64_scalbn(t, uim, &s);                         \
2287
                j = float64_to_int64(t, &s);                            \
2288
                r->element[i] = satcvt(j, &sat);                        \
2289
            }                                                           \
2290
        }                                                               \
2291
        if (sat) {                                                      \
2292
            env->vscr |= (1 << VSCR_SAT);                               \
2293
        }                                                               \
2294
    }
2295
VCT(uxs, cvtsduw, u32)
2296
VCT(sxs, cvtsdsw, s32)
2297
#undef VCT
2298

    
2299
void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2300
{
2301
    int i;
2302
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2303
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2304
            /* Need to do the computation in higher precision and round
2305
             * once at the end.  */
2306
            float64 af, bf, cf, t;
2307
            af = float32_to_float64(a->f[i], &env->vec_status);
2308
            bf = float32_to_float64(b->f[i], &env->vec_status);
2309
            cf = float32_to_float64(c->f[i], &env->vec_status);
2310
            t = float64_mul(af, cf, &env->vec_status);
2311
            t = float64_add(t, bf, &env->vec_status);
2312
            r->f[i] = float64_to_float32(t, &env->vec_status);
2313
        }
2314
    }
2315
}
2316

    
2317
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2318
{
2319
    int sat = 0;
2320
    int i;
2321

    
2322
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2323
        int32_t prod = a->s16[i] * b->s16[i];
2324
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2325
        r->s16[i] = cvtswsh (t, &sat);
2326
    }
2327

    
2328
    if (sat) {
2329
        env->vscr |= (1 << VSCR_SAT);
2330
    }
2331
}
2332

    
2333
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2334
{
2335
    int sat = 0;
2336
    int i;
2337

    
2338
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2339
        int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2340
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2341
        r->s16[i] = cvtswsh (t, &sat);
2342
    }
2343

    
2344
    if (sat) {
2345
        env->vscr |= (1 << VSCR_SAT);
2346
    }
2347
}
2348

    
2349
#define VMINMAX_DO(name, compare, element)                              \
2350
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2351
    {                                                                   \
2352
        int i;                                                          \
2353
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2354
            if (a->element[i] compare b->element[i]) {                  \
2355
                r->element[i] = b->element[i];                          \
2356
            } else {                                                    \
2357
                r->element[i] = a->element[i];                          \
2358
            }                                                           \
2359
        }                                                               \
2360
    }
2361
#define VMINMAX(suffix, element)                \
2362
  VMINMAX_DO(min##suffix, >, element)           \
2363
  VMINMAX_DO(max##suffix, <, element)
2364
VMINMAX(sb, s8)
2365
VMINMAX(sh, s16)
2366
VMINMAX(sw, s32)
2367
VMINMAX(ub, u8)
2368
VMINMAX(uh, u16)
2369
VMINMAX(uw, u32)
2370
#undef VMINMAX_DO
2371
#undef VMINMAX
2372

    
2373
#define VMINMAXFP(suffix, rT, rF)                                       \
2374
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2375
    {                                                                   \
2376
        int i;                                                          \
2377
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2378
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2379
                if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2380
                    r->f[i] = rT->f[i];                                 \
2381
                } else {                                                \
2382
                    r->f[i] = rF->f[i];                                 \
2383
                }                                                       \
2384
            }                                                           \
2385
        }                                                               \
2386
    }
2387
VMINMAXFP(minfp, a, b)
2388
VMINMAXFP(maxfp, b, a)
2389
#undef VMINMAXFP
2390

    
2391
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2392
{
2393
    int i;
2394
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2395
        int32_t prod = a->s16[i] * b->s16[i];
2396
        r->s16[i] = (int16_t) (prod + c->s16[i]);
2397
    }
2398
}
2399

    
2400
#define VMRG_DO(name, element, highp)                                   \
2401
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2402
    {                                                                   \
2403
        ppc_avr_t result;                                               \
2404
        int i;                                                          \
2405
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2406
        for (i = 0; i < n_elems/2; i++) {                               \
2407
            if (highp) {                                                \
2408
                result.element[i*2+HI_IDX] = a->element[i];             \
2409
                result.element[i*2+LO_IDX] = b->element[i];             \
2410
            } else {                                                    \
2411
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2412
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2413
            }                                                           \
2414
        }                                                               \
2415
        *r = result;                                                    \
2416
    }
2417
#if defined(HOST_WORDS_BIGENDIAN)
2418
#define MRGHI 0
2419
#define MRGLO 1
2420
#else
2421
#define MRGHI 1
2422
#define MRGLO 0
2423
#endif
2424
#define VMRG(suffix, element)                   \
2425
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2426
  VMRG_DO(mrgh##suffix, element, MRGLO)
2427
VMRG(b, u8)
2428
VMRG(h, u16)
2429
VMRG(w, u32)
2430
#undef VMRG_DO
2431
#undef VMRG
2432
#undef MRGHI
2433
#undef MRGLO
2434

    
2435
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2436
{
2437
    int32_t prod[16];
2438
    int i;
2439

    
2440
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2441
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2442
    }
2443

    
2444
    VECTOR_FOR_INORDER_I(i, s32) {
2445
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2446
    }
2447
}
2448

    
2449
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2450
{
2451
    int32_t prod[8];
2452
    int i;
2453

    
2454
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2455
        prod[i] = a->s16[i] * b->s16[i];
2456
    }
2457

    
2458
    VECTOR_FOR_INORDER_I(i, s32) {
2459
        r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2460
    }
2461
}
2462

    
2463
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2464
{
2465
    int32_t prod[8];
2466
    int i;
2467
    int sat = 0;
2468

    
2469
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2470
        prod[i] = (int32_t)a->s16[i] * b->s16[i];
2471
    }
2472

    
2473
    VECTOR_FOR_INORDER_I (i, s32) {
2474
        int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2475
        r->u32[i] = cvtsdsw(t, &sat);
2476
    }
2477

    
2478
    if (sat) {
2479
        env->vscr |= (1 << VSCR_SAT);
2480
    }
2481
}
2482

    
2483
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2484
{
2485
    uint16_t prod[16];
2486
    int i;
2487

    
2488
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2489
        prod[i] = a->u8[i] * b->u8[i];
2490
    }
2491

    
2492
    VECTOR_FOR_INORDER_I(i, u32) {
2493
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2494
    }
2495
}
2496

    
2497
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2498
{
2499
    uint32_t prod[8];
2500
    int i;
2501

    
2502
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2503
        prod[i] = a->u16[i] * b->u16[i];
2504
    }
2505

    
2506
    VECTOR_FOR_INORDER_I(i, u32) {
2507
        r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2508
    }
2509
}
2510

    
2511
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2512
{
2513
    uint32_t prod[8];
2514
    int i;
2515
    int sat = 0;
2516

    
2517
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2518
        prod[i] = a->u16[i] * b->u16[i];
2519
    }
2520

    
2521
    VECTOR_FOR_INORDER_I (i, s32) {
2522
        uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2523
        r->u32[i] = cvtuduw(t, &sat);
2524
    }
2525

    
2526
    if (sat) {
2527
        env->vscr |= (1 << VSCR_SAT);
2528
    }
2529
}
2530

    
2531
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2532
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2533
    {                                                                   \
2534
        int i;                                                          \
2535
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2536
            if (evenp) {                                                \
2537
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2538
            } else {                                                    \
2539
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2540
            }                                                           \
2541
        }                                                               \
2542
    }
2543
#define VMUL(suffix, mul_element, prod_element) \
2544
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2545
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2546
VMUL(sb, s8, s16)
2547
VMUL(sh, s16, s32)
2548
VMUL(ub, u8, u16)
2549
VMUL(uh, u16, u32)
2550
#undef VMUL_DO
2551
#undef VMUL
2552

    
2553
void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2554
{
2555
    int i;
2556
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2557
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2558
            /* Need to do the computation is higher precision and round
2559
             * once at the end.  */
2560
            float64 af, bf, cf, t;
2561
            af = float32_to_float64(a->f[i], &env->vec_status);
2562
            bf = float32_to_float64(b->f[i], &env->vec_status);
2563
            cf = float32_to_float64(c->f[i], &env->vec_status);
2564
            t = float64_mul(af, cf, &env->vec_status);
2565
            t = float64_sub(t, bf, &env->vec_status);
2566
            t = float64_chs(t);
2567
            r->f[i] = float64_to_float32(t, &env->vec_status);
2568
        }
2569
    }
2570
}
2571

    
2572
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2573
{
2574
    ppc_avr_t result;
2575
    int i;
2576
    VECTOR_FOR_INORDER_I (i, u8) {
2577
        int s = c->u8[i] & 0x1f;
2578
#if defined(HOST_WORDS_BIGENDIAN)
2579
        int index = s & 0xf;
2580
#else
2581
        int index = 15 - (s & 0xf);
2582
#endif
2583
        if (s & 0x10) {
2584
            result.u8[i] = b->u8[index];
2585
        } else {
2586
            result.u8[i] = a->u8[index];
2587
        }
2588
    }
2589
    *r = result;
2590
}
2591

    
2592
#if defined(HOST_WORDS_BIGENDIAN)
2593
#define PKBIG 1
2594
#else
2595
#define PKBIG 0
2596
#endif
2597
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2598
{
2599
    int i, j;
2600
    ppc_avr_t result;
2601
#if defined(HOST_WORDS_BIGENDIAN)
2602
    const ppc_avr_t *x[2] = { a, b };
2603
#else
2604
    const ppc_avr_t *x[2] = { b, a };
2605
#endif
2606

    
2607
    VECTOR_FOR_INORDER_I (i, u64) {
2608
        VECTOR_FOR_INORDER_I (j, u32){
2609
            uint32_t e = x[i]->u32[j];
2610
            result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2611
                                 ((e >> 6) & 0x3e0) |
2612
                                 ((e >> 3) & 0x1f));
2613
        }
2614
    }
2615
    *r = result;
2616
}
2617

    
2618
#define VPK(suffix, from, to, cvt, dosat)       \
2619
    void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2620
    {                                                                   \
2621
        int i;                                                          \
2622
        int sat = 0;                                                    \
2623
        ppc_avr_t result;                                               \
2624
        ppc_avr_t *a0 = PKBIG ? a : b;                                  \
2625
        ppc_avr_t *a1 = PKBIG ? b : a;                                  \
2626
        VECTOR_FOR_INORDER_I (i, from) {                                \
2627
            result.to[i] = cvt(a0->from[i], &sat);                      \
2628
            result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
2629
        }                                                               \
2630
        *r = result;                                                    \
2631
        if (dosat && sat) {                                             \
2632
            env->vscr |= (1 << VSCR_SAT);                               \
2633
        }                                                               \
2634
    }
2635
#define I(x, y) (x)
2636
VPK(shss, s16, s8, cvtshsb, 1)
2637
VPK(shus, s16, u8, cvtshub, 1)
2638
VPK(swss, s32, s16, cvtswsh, 1)
2639
VPK(swus, s32, u16, cvtswuh, 1)
2640
VPK(uhus, u16, u8, cvtuhub, 1)
2641
VPK(uwus, u32, u16, cvtuwuh, 1)
2642
VPK(uhum, u16, u8, I, 0)
2643
VPK(uwum, u32, u16, I, 0)
2644
#undef I
2645
#undef VPK
2646
#undef PKBIG
2647

    
2648
void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2649
{
2650
    int i;
2651
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2652
        HANDLE_NAN1(r->f[i], b->f[i]) {
2653
            r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2654
        }
2655
    }
2656
}
2657

    
2658
#define VRFI(suffix, rounding)                                          \
2659
    void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2660
    {                                                                   \
2661
        int i;                                                          \
2662
        float_status s = env->vec_status;                               \
2663
        set_float_rounding_mode(rounding, &s);                          \
2664
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2665
            HANDLE_NAN1(r->f[i], b->f[i]) {                             \
2666
                r->f[i] = float32_round_to_int (b->f[i], &s);           \
2667
            }                                                           \
2668
        }                                                               \
2669
    }
2670
VRFI(n, float_round_nearest_even)
2671
VRFI(m, float_round_down)
2672
VRFI(p, float_round_up)
2673
VRFI(z, float_round_to_zero)
2674
#undef VRFI
2675

    
2676
#define VROTATE(suffix, element)                                        \
2677
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2678
    {                                                                   \
2679
        int i;                                                          \
2680
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2681
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2682
            unsigned int shift = b->element[i] & mask;                  \
2683
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2684
        }                                                               \
2685
    }
2686
VROTATE(b, u8)
2687
VROTATE(h, u16)
2688
VROTATE(w, u32)
2689
#undef VROTATE
2690

    
2691
void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2692
{
2693
    int i;
2694
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2695
        HANDLE_NAN1(r->f[i], b->f[i]) {
2696
            float32 t = float32_sqrt(b->f[i], &env->vec_status);
2697
            r->f[i] = float32_div(float32_one, t, &env->vec_status);
2698
        }
2699
    }
2700
}
2701

    
2702
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2703
{
2704
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2705
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2706
}
2707

    
2708
void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
2709
{
2710
    int i;
2711
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2712
        HANDLE_NAN1(r->f[i], b->f[i]) {
2713
            r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2714
        }
2715
    }
2716
}
2717

    
2718
void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2719
{
2720
    int i;
2721
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2722
        HANDLE_NAN1(r->f[i], b->f[i]) {
2723
            r->f[i] = float32_log2(b->f[i], &env->vec_status);
2724
        }
2725
    }
2726
}
2727

    
2728
#if defined(HOST_WORDS_BIGENDIAN)
2729
#define LEFT 0
2730
#define RIGHT 1
2731
#else
2732
#define LEFT 1
2733
#define RIGHT 0
2734
#endif
2735
/* The specification says that the results are undefined if all of the
2736
 * shift counts are not identical.  We check to make sure that they are
2737
 * to conform to what real hardware appears to do.  */
2738
#define VSHIFT(suffix, leftp)                                           \
2739
    void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
2740
    {                                                                   \
2741
        int shift = b->u8[LO_IDX*15] & 0x7;                             \
2742
        int doit = 1;                                                   \
2743
        int i;                                                          \
2744
        for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
2745
            doit = doit && ((b->u8[i] & 0x7) == shift);                 \
2746
        }                                                               \
2747
        if (doit) {                                                     \
2748
            if (shift == 0) {                                           \
2749
                *r = *a;                                                \
2750
            } else if (leftp) {                                         \
2751
                uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
2752
                r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
2753
                r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
2754
            } else {                                                    \
2755
                uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
2756
                r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
2757
                r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
2758
            }                                                           \
2759
        }                                                               \
2760
    }
2761
VSHIFT(l, LEFT)
2762
VSHIFT(r, RIGHT)
2763
#undef VSHIFT
2764
#undef LEFT
2765
#undef RIGHT
2766

    
2767
#define VSL(suffix, element)                                            \
2768
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2769
    {                                                                   \
2770
        int i;                                                          \
2771
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2772
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2773
            unsigned int shift = b->element[i] & mask;                  \
2774
            r->element[i] = a->element[i] << shift;                     \
2775
        }                                                               \
2776
    }
2777
VSL(b, u8)
2778
VSL(h, u16)
2779
VSL(w, u32)
2780
#undef VSL
2781

    
2782
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2783
{
2784
    int sh = shift & 0xf;
2785
    int i;
2786
    ppc_avr_t result;
2787

    
2788
#if defined(HOST_WORDS_BIGENDIAN)
2789
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2790
        int index = sh + i;
2791
        if (index > 0xf) {
2792
            result.u8[i] = b->u8[index-0x10];
2793
        } else {
2794
            result.u8[i] = a->u8[index];
2795
        }
2796
    }
2797
#else
2798
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2799
        int index = (16 - sh) + i;
2800
        if (index > 0xf) {
2801
            result.u8[i] = a->u8[index-0x10];
2802
        } else {
2803
            result.u8[i] = b->u8[index];
2804
        }
2805
    }
2806
#endif
2807
    *r = result;
2808
}
2809

    
2810
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2811
{
2812
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2813

    
2814
#if defined (HOST_WORDS_BIGENDIAN)
2815
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2816
  memset (&r->u8[16-sh], 0, sh);
2817
#else
2818
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2819
  memset (&r->u8[0], 0, sh);
2820
#endif
2821
}
2822

    
2823
/* Experimental testing shows that hardware masks the immediate.  */
2824
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2825
#if defined(HOST_WORDS_BIGENDIAN)
2826
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2827
#else
2828
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2829
#endif
2830
#define VSPLT(suffix, element)                                          \
2831
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2832
    {                                                                   \
2833
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2834
        int i;                                                          \
2835
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2836
            r->element[i] = s;                                          \
2837
        }                                                               \
2838
    }
2839
VSPLT(b, u8)
2840
VSPLT(h, u16)
2841
VSPLT(w, u32)
2842
#undef VSPLT
2843
#undef SPLAT_ELEMENT
2844
#undef _SPLAT_MASKED
2845

    
2846
#define VSPLTI(suffix, element, splat_type)                     \
2847
    void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat)  \
2848
    {                                                           \
2849
        splat_type x = (int8_t)(splat << 3) >> 3;               \
2850
        int i;                                                  \
2851
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
2852
            r->element[i] = x;                                  \
2853
        }                                                       \
2854
    }
2855
VSPLTI(b, s8, int8_t)
2856
VSPLTI(h, s16, int16_t)
2857
VSPLTI(w, s32, int32_t)
2858
#undef VSPLTI
2859

    
2860
#define VSR(suffix, element)                                            \
2861
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2862
    {                                                                   \
2863
        int i;                                                          \
2864
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2865
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2866
            unsigned int shift = b->element[i] & mask;                  \
2867
            r->element[i] = a->element[i] >> shift;                     \
2868
        }                                                               \
2869
    }
2870
VSR(ab, s8)
2871
VSR(ah, s16)
2872
VSR(aw, s32)
2873
VSR(b, u8)
2874
VSR(h, u16)
2875
VSR(w, u32)
2876
#undef VSR
2877

    
2878
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2879
{
2880
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2881

    
2882
#if defined (HOST_WORDS_BIGENDIAN)
2883
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2884
  memset (&r->u8[0], 0, sh);
2885
#else
2886
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2887
  memset (&r->u8[16-sh], 0, sh);
2888
#endif
2889
}
2890

    
2891
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2892
{
2893
    int i;
2894
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2895
        r->u32[i] = a->u32[i] >= b->u32[i];
2896
    }
2897
}
2898

    
2899
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2900
{
2901
    int64_t t;
2902
    int i, upper;
2903
    ppc_avr_t result;
2904
    int sat = 0;
2905

    
2906
#if defined(HOST_WORDS_BIGENDIAN)
2907
    upper = ARRAY_SIZE(r->s32)-1;
2908
#else
2909
    upper = 0;
2910
#endif
2911
    t = (int64_t)b->s32[upper];
2912
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2913
        t += a->s32[i];
2914
        result.s32[i] = 0;
2915
    }
2916
    result.s32[upper] = cvtsdsw(t, &sat);
2917
    *r = result;
2918

    
2919
    if (sat) {
2920
        env->vscr |= (1 << VSCR_SAT);
2921
    }
2922
}
2923

    
2924
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2925
{
2926
    int i, j, upper;
2927
    ppc_avr_t result;
2928
    int sat = 0;
2929

    
2930
#if defined(HOST_WORDS_BIGENDIAN)
2931
    upper = 1;
2932
#else
2933
    upper = 0;
2934
#endif
2935
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2936
        int64_t t = (int64_t)b->s32[upper+i*2];
2937
        result.u64[i] = 0;
2938
        for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2939
            t += a->s32[2*i+j];
2940
        }
2941
        result.s32[upper+i*2] = cvtsdsw(t, &sat);
2942
    }
2943

    
2944
    *r = result;
2945
    if (sat) {
2946
        env->vscr |= (1 << VSCR_SAT);
2947
    }
2948
}
2949

    
2950
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2951
{
2952
    int i, j;
2953
    int sat = 0;
2954

    
2955
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2956
        int64_t t = (int64_t)b->s32[i];
2957
        for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2958
            t += a->s8[4*i+j];
2959
        }
2960
        r->s32[i] = cvtsdsw(t, &sat);
2961
    }
2962

    
2963
    if (sat) {
2964
        env->vscr |= (1 << VSCR_SAT);
2965
    }
2966
}
2967

    
2968
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2969
{
2970
    int sat = 0;
2971
    int i;
2972

    
2973
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2974
        int64_t t = (int64_t)b->s32[i];
2975
        t += a->s16[2*i] + a->s16[2*i+1];
2976
        r->s32[i] = cvtsdsw(t, &sat);
2977
    }
2978

    
2979
    if (sat) {
2980
        env->vscr |= (1 << VSCR_SAT);
2981
    }
2982
}
2983

    
2984
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2985
{
2986
    int i, j;
2987
    int sat = 0;
2988

    
2989
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2990
        uint64_t t = (uint64_t)b->u32[i];
2991
        for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2992
            t += a->u8[4*i+j];
2993
        }
2994
        r->u32[i] = cvtuduw(t, &sat);
2995
    }
2996

    
2997
    if (sat) {
2998
        env->vscr |= (1 << VSCR_SAT);
2999
    }
3000
}
3001

    
3002
#if defined(HOST_WORDS_BIGENDIAN)
3003
#define UPKHI 1
3004
#define UPKLO 0
3005
#else
3006
#define UPKHI 0
3007
#define UPKLO 1
3008
#endif
3009
#define VUPKPX(suffix, hi)                                      \
3010
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
3011
    {                                                           \
3012
        int i;                                                  \
3013
        ppc_avr_t result;                                       \
3014
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
3015
            uint16_t e = b->u16[hi ? i : i+4];                  \
3016
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
3017
            uint8_t r = (e >> 10) & 0x1f;                       \
3018
            uint8_t g = (e >> 5) & 0x1f;                        \
3019
            uint8_t b = e & 0x1f;                               \
3020
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
3021
        }                                                               \
3022
        *r = result;                                                    \
3023
    }
3024
VUPKPX(lpx, UPKLO)
3025
VUPKPX(hpx, UPKHI)
3026
#undef VUPKPX
3027

    
3028
#define VUPK(suffix, unpacked, packee, hi)                              \
3029
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
3030
    {                                                                   \
3031
        int i;                                                          \
3032
        ppc_avr_t result;                                               \
3033
        if (hi) {                                                       \
3034
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
3035
                result.unpacked[i] = b->packee[i];                      \
3036
            }                                                           \
3037
        } else {                                                        \
3038
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3039
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3040
            }                                                           \
3041
        }                                                               \
3042
        *r = result;                                                    \
3043
    }
3044
VUPK(hsb, s16, s8, UPKHI)
3045
VUPK(hsh, s32, s16, UPKHI)
3046
VUPK(lsb, s16, s8, UPKLO)
3047
VUPK(lsh, s32, s16, UPKLO)
3048
#undef VUPK
3049
#undef UPKHI
3050
#undef UPKLO
3051

    
3052
#undef DO_HANDLE_NAN
3053
#undef HANDLE_NAN1
3054
#undef HANDLE_NAN2
3055
#undef HANDLE_NAN3
3056
#undef VECTOR_FOR_INORDER_I
3057
#undef HI_IDX
3058
#undef LO_IDX
3059

    
3060
/*****************************************************************************/
3061
/* SPE extension helpers */
3062
/* Use a table to make this quicker */
3063
static uint8_t hbrev[16] = {
3064
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3065
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3066
};
3067

    
3068
static inline uint8_t byte_reverse(uint8_t val)
3069
{
3070
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3071
}
3072

    
3073
static inline uint32_t word_reverse(uint32_t val)
3074
{
3075
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3076
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3077
}
3078

    
3079
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3080
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3081
{
3082
    uint32_t a, b, d, mask;
3083

    
3084
    mask = UINT32_MAX >> (32 - MASKBITS);
3085
    a = arg1 & mask;
3086
    b = arg2 & mask;
3087
    d = word_reverse(1 + word_reverse(a | ~b));
3088
    return (arg1 & ~mask) | (d & b);
3089
}
3090

    
3091
uint32_t helper_cntlsw32 (uint32_t val)
3092
{
3093
    if (val & 0x80000000)
3094
        return clz32(~val);
3095
    else
3096
        return clz32(val);
3097
}
3098

    
3099
uint32_t helper_cntlzw32 (uint32_t val)
3100
{
3101
    return clz32(val);
3102
}
3103

    
3104
/* Single-precision floating-point conversions */
3105
static inline uint32_t efscfsi(uint32_t val)
3106
{
3107
    CPU_FloatU u;
3108

    
3109
    u.f = int32_to_float32(val, &env->vec_status);
3110

    
3111
    return u.l;
3112
}
3113

    
3114
static inline uint32_t efscfui(uint32_t val)
3115
{
3116
    CPU_FloatU u;
3117

    
3118
    u.f = uint32_to_float32(val, &env->vec_status);
3119

    
3120
    return u.l;
3121
}
3122

    
3123
static inline int32_t efsctsi(uint32_t val)
3124
{
3125
    CPU_FloatU u;
3126

    
3127
    u.l = val;
3128
    /* NaN are not treated the same way IEEE 754 does */
3129
    if (unlikely(float32_is_quiet_nan(u.f)))
3130
        return 0;
3131

    
3132
    return float32_to_int32(u.f, &env->vec_status);
3133
}
3134

    
3135
static inline uint32_t efsctui(uint32_t val)
3136
{
3137
    CPU_FloatU u;
3138

    
3139
    u.l = val;
3140
    /* NaN are not treated the same way IEEE 754 does */
3141
    if (unlikely(float32_is_quiet_nan(u.f)))
3142
        return 0;
3143

    
3144
    return float32_to_uint32(u.f, &env->vec_status);
3145
}
3146

    
3147
static inline uint32_t efsctsiz(uint32_t val)
3148
{
3149
    CPU_FloatU u;
3150

    
3151
    u.l = val;
3152
    /* NaN are not treated the same way IEEE 754 does */
3153
    if (unlikely(float32_is_quiet_nan(u.f)))
3154
        return 0;
3155

    
3156
    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3157
}
3158

    
3159
static inline uint32_t efsctuiz(uint32_t val)
3160
{
3161
    CPU_FloatU u;
3162

    
3163
    u.l = val;
3164
    /* NaN are not treated the same way IEEE 754 does */
3165
    if (unlikely(float32_is_quiet_nan(u.f)))
3166
        return 0;
3167

    
3168
    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3169
}
3170

    
3171
static inline uint32_t efscfsf(uint32_t val)
3172
{
3173
    CPU_FloatU u;
3174
    float32 tmp;
3175

    
3176
    u.f = int32_to_float32(val, &env->vec_status);
3177
    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3178
    u.f = float32_div(u.f, tmp, &env->vec_status);
3179

    
3180
    return u.l;
3181
}
3182

    
3183
static inline uint32_t efscfuf(uint32_t val)
3184
{
3185
    CPU_FloatU u;
3186
    float32 tmp;
3187

    
3188
    u.f = uint32_to_float32(val, &env->vec_status);
3189
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3190
    u.f = float32_div(u.f, tmp, &env->vec_status);
3191

    
3192
    return u.l;
3193
}
3194

    
3195
static inline uint32_t efsctsf(uint32_t val)
3196
{
3197
    CPU_FloatU u;
3198
    float32 tmp;
3199

    
3200
    u.l = val;
3201
    /* NaN are not treated the same way IEEE 754 does */
3202
    if (unlikely(float32_is_quiet_nan(u.f)))
3203
        return 0;
3204
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3205
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3206

    
3207
    return float32_to_int32(u.f, &env->vec_status);
3208
}
3209

    
3210
static inline uint32_t efsctuf(uint32_t val)
3211
{
3212
    CPU_FloatU u;
3213
    float32 tmp;
3214

    
3215
    u.l = val;
3216
    /* NaN are not treated the same way IEEE 754 does */
3217
    if (unlikely(float32_is_quiet_nan(u.f)))
3218
        return 0;
3219
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3220
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3221

    
3222
    return float32_to_uint32(u.f, &env->vec_status);
3223
}
3224

    
3225
#define HELPER_SPE_SINGLE_CONV(name)                                          \
3226
uint32_t helper_e##name (uint32_t val)                                        \
3227
{                                                                             \
3228
    return e##name(val);                                                      \
3229
}
3230
/* efscfsi */
3231
HELPER_SPE_SINGLE_CONV(fscfsi);
3232
/* efscfui */
3233
HELPER_SPE_SINGLE_CONV(fscfui);
3234
/* efscfuf */
3235
HELPER_SPE_SINGLE_CONV(fscfuf);
3236
/* efscfsf */
3237
HELPER_SPE_SINGLE_CONV(fscfsf);
3238
/* efsctsi */
3239
HELPER_SPE_SINGLE_CONV(fsctsi);
3240
/* efsctui */
3241
HELPER_SPE_SINGLE_CONV(fsctui);
3242
/* efsctsiz */
3243
HELPER_SPE_SINGLE_CONV(fsctsiz);
3244
/* efsctuiz */
3245
HELPER_SPE_SINGLE_CONV(fsctuiz);
3246
/* efsctsf */
3247
HELPER_SPE_SINGLE_CONV(fsctsf);
3248
/* efsctuf */
3249
HELPER_SPE_SINGLE_CONV(fsctuf);
3250

    
3251
#define HELPER_SPE_VECTOR_CONV(name)                                          \
3252
uint64_t helper_ev##name (uint64_t val)                                       \
3253
{                                                                             \
3254
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
3255
            (uint64_t)e##name(val);                                           \
3256
}
3257
/* evfscfsi */
3258
HELPER_SPE_VECTOR_CONV(fscfsi);
3259
/* evfscfui */
3260
HELPER_SPE_VECTOR_CONV(fscfui);
3261
/* evfscfuf */
3262
HELPER_SPE_VECTOR_CONV(fscfuf);
3263
/* evfscfsf */
3264
HELPER_SPE_VECTOR_CONV(fscfsf);
3265
/* evfsctsi */
3266
HELPER_SPE_VECTOR_CONV(fsctsi);
3267
/* evfsctui */
3268
HELPER_SPE_VECTOR_CONV(fsctui);
3269
/* evfsctsiz */
3270
HELPER_SPE_VECTOR_CONV(fsctsiz);
3271
/* evfsctuiz */
3272
HELPER_SPE_VECTOR_CONV(fsctuiz);
3273
/* evfsctsf */
3274
HELPER_SPE_VECTOR_CONV(fsctsf);
3275
/* evfsctuf */
3276
HELPER_SPE_VECTOR_CONV(fsctuf);
3277

    
3278
/* Single-precision floating-point arithmetic */
3279
static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3280
{
3281
    CPU_FloatU u1, u2;
3282
    u1.l = op1;
3283
    u2.l = op2;
3284
    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3285
    return u1.l;
3286
}
3287

    
3288
static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3289
{
3290
    CPU_FloatU u1, u2;
3291
    u1.l = op1;
3292
    u2.l = op2;
3293
    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3294
    return u1.l;
3295
}
3296

    
3297
static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3298
{
3299
    CPU_FloatU u1, u2;
3300
    u1.l = op1;
3301
    u2.l = op2;
3302
    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3303
    return u1.l;
3304
}
3305

    
3306
static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3307
{
3308
    CPU_FloatU u1, u2;
3309
    u1.l = op1;
3310
    u2.l = op2;
3311
    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3312
    return u1.l;
3313
}
3314

    
3315
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
3316
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3317
{                                                                             \
3318
    return e##name(op1, op2);                                                 \
3319
}
3320
/* efsadd */
3321
HELPER_SPE_SINGLE_ARITH(fsadd);
3322
/* efssub */
3323
HELPER_SPE_SINGLE_ARITH(fssub);
3324
/* efsmul */
3325
HELPER_SPE_SINGLE_ARITH(fsmul);
3326
/* efsdiv */
3327
HELPER_SPE_SINGLE_ARITH(fsdiv);
3328

    
3329
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
3330
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3331
{                                                                             \
3332
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
3333
            (uint64_t)e##name(op1, op2);                                      \
3334
}
3335
/* evfsadd */
3336
HELPER_SPE_VECTOR_ARITH(fsadd);
3337
/* evfssub */
3338
HELPER_SPE_VECTOR_ARITH(fssub);
3339
/* evfsmul */
3340
HELPER_SPE_VECTOR_ARITH(fsmul);
3341
/* evfsdiv */
3342
HELPER_SPE_VECTOR_ARITH(fsdiv);
3343

    
3344
/* Single-precision floating-point comparisons */
3345
static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3346
{
3347
    CPU_FloatU u1, u2;
3348
    u1.l = op1;
3349
    u2.l = op2;
3350
    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3351
}
3352

    
3353
static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3354
{
3355
    CPU_FloatU u1, u2;
3356
    u1.l = op1;
3357
    u2.l = op2;
3358
    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3359
}
3360

    
3361
static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3362
{
3363
    CPU_FloatU u1, u2;
3364
    u1.l = op1;
3365
    u2.l = op2;
3366
    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3367
}
3368

    
3369
static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3370
{
3371
    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
3372
    return efscmplt(op1, op2);
3373
}
3374

    
3375
static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3376
{
3377
    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
3378
    return efscmpgt(op1, op2);
3379
}
3380

    
3381
static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3382
{
3383
    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
3384
    return efscmpeq(op1, op2);
3385
}
3386

    
3387
#define HELPER_SINGLE_SPE_CMP(name)                                           \
3388
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3389
{                                                                             \
3390
    return e##name(op1, op2) << 2;                                            \
3391
}
3392
/* efststlt */
3393
HELPER_SINGLE_SPE_CMP(fststlt);
3394
/* efststgt */
3395
HELPER_SINGLE_SPE_CMP(fststgt);
3396
/* efststeq */
3397
HELPER_SINGLE_SPE_CMP(fststeq);
3398
/* efscmplt */
3399
HELPER_SINGLE_SPE_CMP(fscmplt);
3400
/* efscmpgt */
3401
HELPER_SINGLE_SPE_CMP(fscmpgt);
3402
/* efscmpeq */
3403
HELPER_SINGLE_SPE_CMP(fscmpeq);
3404

    
3405
static inline uint32_t evcmp_merge(int t0, int t1)
3406
{
3407
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3408
}
3409

    
3410
#define HELPER_VECTOR_SPE_CMP(name)                                           \
3411
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3412
{                                                                             \
3413
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
3414
}
3415
/* evfststlt */
3416
HELPER_VECTOR_SPE_CMP(fststlt);
3417
/* evfststgt */
3418
HELPER_VECTOR_SPE_CMP(fststgt);
3419
/* evfststeq */
3420
HELPER_VECTOR_SPE_CMP(fststeq);
3421
/* evfscmplt */
3422
HELPER_VECTOR_SPE_CMP(fscmplt);
3423
/* evfscmpgt */
3424
HELPER_VECTOR_SPE_CMP(fscmpgt);
3425
/* evfscmpeq */
3426
HELPER_VECTOR_SPE_CMP(fscmpeq);
3427

    
3428
/* Double-precision floating-point conversion */
3429
uint64_t helper_efdcfsi (uint32_t val)
3430
{
3431
    CPU_DoubleU u;
3432

    
3433
    u.d = int32_to_float64(val, &env->vec_status);
3434

    
3435
    return u.ll;
3436
}
3437

    
3438
uint64_t helper_efdcfsid (uint64_t val)
3439
{
3440
    CPU_DoubleU u;
3441

    
3442
    u.d = int64_to_float64(val, &env->vec_status);
3443

    
3444
    return u.ll;
3445
}
3446

    
3447
uint64_t helper_efdcfui (uint32_t val)
3448
{
3449
    CPU_DoubleU u;
3450

    
3451
    u.d = uint32_to_float64(val, &env->vec_status);
3452

    
3453
    return u.ll;
3454
}
3455

    
3456
uint64_t helper_efdcfuid (uint64_t val)
3457
{
3458
    CPU_DoubleU u;
3459

    
3460
    u.d = uint64_to_float64(val, &env->vec_status);
3461

    
3462
    return u.ll;
3463
}
3464

    
3465
uint32_t helper_efdctsi (uint64_t val)
3466
{
3467
    CPU_DoubleU u;
3468

    
3469
    u.ll = val;
3470
    /* NaN are not treated the same way IEEE 754 does */
3471
    if (unlikely(float64_is_any_nan(u.d))) {
3472
        return 0;
3473
    }
3474

    
3475
    return float64_to_int32(u.d, &env->vec_status);
3476
}
3477

    
3478
uint32_t helper_efdctui (uint64_t val)
3479
{
3480
    CPU_DoubleU u;
3481

    
3482
    u.ll = val;
3483
    /* NaN are not treated the same way IEEE 754 does */
3484
    if (unlikely(float64_is_any_nan(u.d))) {
3485
        return 0;
3486
    }
3487

    
3488
    return float64_to_uint32(u.d, &env->vec_status);
3489
}
3490

    
3491
uint32_t helper_efdctsiz (uint64_t val)
3492
{
3493
    CPU_DoubleU u;
3494

    
3495
    u.ll = val;
3496
    /* NaN are not treated the same way IEEE 754 does */
3497
    if (unlikely(float64_is_any_nan(u.d))) {
3498
        return 0;
3499
    }
3500

    
3501
    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3502
}
3503

    
3504
uint64_t helper_efdctsidz (uint64_t val)
3505
{
3506
    CPU_DoubleU u;
3507

    
3508
    u.ll = val;
3509
    /* NaN are not treated the same way IEEE 754 does */
3510
    if (unlikely(float64_is_any_nan(u.d))) {
3511
        return 0;
3512
    }
3513

    
3514
    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3515
}
3516

    
3517
uint32_t helper_efdctuiz (uint64_t val)
3518
{
3519
    CPU_DoubleU u;
3520

    
3521
    u.ll = val;
3522
    /* NaN are not treated the same way IEEE 754 does */
3523
    if (unlikely(float64_is_any_nan(u.d))) {
3524
        return 0;
3525
    }
3526

    
3527
    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3528
}
3529

    
3530
uint64_t helper_efdctuidz (uint64_t val)
3531
{
3532
    CPU_DoubleU u;
3533

    
3534
    u.ll = val;
3535
    /* NaN are not treated the same way IEEE 754 does */
3536
    if (unlikely(float64_is_any_nan(u.d))) {
3537
        return 0;
3538
    }
3539

    
3540
    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3541
}
3542

    
3543
uint64_t helper_efdcfsf (uint32_t val)
3544
{
3545
    CPU_DoubleU u;
3546
    float64 tmp;
3547

    
3548
    u.d = int32_to_float64(val, &env->vec_status);
3549
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3550
    u.d = float64_div(u.d, tmp, &env->vec_status);
3551

    
3552
    return u.ll;
3553
}
3554

    
3555
uint64_t helper_efdcfuf (uint32_t val)
3556
{
3557
    CPU_DoubleU u;
3558
    float64 tmp;
3559

    
3560
    u.d = uint32_to_float64(val, &env->vec_status);
3561
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3562
    u.d = float64_div(u.d, tmp, &env->vec_status);
3563

    
3564
    return u.ll;
3565
}
3566

    
3567
uint32_t helper_efdctsf (uint64_t val)
3568
{
3569
    CPU_DoubleU u;
3570
    float64 tmp;
3571

    
3572
    u.ll = val;
3573
    /* NaN are not treated the same way IEEE 754 does */
3574
    if (unlikely(float64_is_any_nan(u.d))) {
3575
        return 0;
3576
    }
3577
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3578
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3579

    
3580
    return float64_to_int32(u.d, &env->vec_status);
3581
}
3582

    
3583
uint32_t helper_efdctuf (uint64_t val)
3584
{
3585
    CPU_DoubleU u;
3586
    float64 tmp;
3587

    
3588
    u.ll = val;
3589
    /* NaN are not treated the same way IEEE 754 does */
3590
    if (unlikely(float64_is_any_nan(u.d))) {
3591
        return 0;
3592
    }
3593
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3594
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3595

    
3596
    return float64_to_uint32(u.d, &env->vec_status);
3597
}
3598

    
3599
uint32_t helper_efscfd (uint64_t val)
3600
{
3601
    CPU_DoubleU u1;
3602
    CPU_FloatU u2;
3603

    
3604
    u1.ll = val;
3605
    u2.f = float64_to_float32(u1.d, &env->vec_status);
3606

    
3607
    return u2.l;
3608
}
3609

    
3610
uint64_t helper_efdcfs (uint32_t val)
3611
{
3612
    CPU_DoubleU u2;
3613
    CPU_FloatU u1;
3614

    
3615
    u1.l = val;
3616
    u2.d = float32_to_float64(u1.f, &env->vec_status);
3617

    
3618
    return u2.ll;
3619
}
3620

    
3621
/* Double precision fixed-point arithmetic */
3622
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3623
{
3624
    CPU_DoubleU u1, u2;
3625
    u1.ll = op1;
3626
    u2.ll = op2;
3627
    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3628
    return u1.ll;
3629
}
3630

    
3631
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3632
{
3633
    CPU_DoubleU u1, u2;
3634
    u1.ll = op1;
3635
    u2.ll = op2;
3636
    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3637
    return u1.ll;
3638
}
3639

    
3640
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3641
{
3642
    CPU_DoubleU u1, u2;
3643
    u1.ll = op1;
3644
    u2.ll = op2;
3645
    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3646
    return u1.ll;
3647
}
3648

    
3649
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3650
{
3651
    CPU_DoubleU u1, u2;
3652
    u1.ll = op1;
3653
    u2.ll = op2;
3654
    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3655
    return u1.ll;
3656
}
3657

    
3658
/* Double precision floating point helpers */
3659
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3660
{
3661
    CPU_DoubleU u1, u2;
3662
    u1.ll = op1;
3663
    u2.ll = op2;
3664
    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3665
}
3666

    
3667
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3668
{
3669
    CPU_DoubleU u1, u2;
3670
    u1.ll = op1;
3671
    u2.ll = op2;
3672
    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3673
}
3674

    
3675
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3676
{
3677
    CPU_DoubleU u1, u2;
3678
    u1.ll = op1;
3679
    u2.ll = op2;
3680
    return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3681
}
3682

    
3683
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3684
{
3685
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3686
    return helper_efdtstlt(op1, op2);
3687
}
3688

    
3689
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3690
{
3691
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3692
    return helper_efdtstgt(op1, op2);
3693
}
3694

    
3695
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3696
{
3697
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3698
    return helper_efdtsteq(op1, op2);
3699
}
3700

    
3701
/*****************************************************************************/
3702
/* Softmmu support */
3703
#if !defined (CONFIG_USER_ONLY)
3704

    
3705
#define MMUSUFFIX _mmu
3706

    
3707
#define SHIFT 0
3708
#include "softmmu_template.h"
3709

    
3710
#define SHIFT 1
3711
#include "softmmu_template.h"
3712

    
3713
#define SHIFT 2
3714
#include "softmmu_template.h"
3715

    
3716
#define SHIFT 3
3717
#include "softmmu_template.h"
3718

    
3719
/* try to fill the TLB and return an exception if error. If retaddr is
3720
   NULL, it means that the function was called in C code (i.e. not
3721
   from generated code or from helper.c) */
3722
/* XXX: fix it to restore all registers */
3723
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3724
{
3725
    TranslationBlock *tb;
3726
    CPUState *saved_env;
3727
    unsigned long pc;
3728
    int ret;
3729

    
3730
    /* XXX: hack to restore env in all cases, even if not called from
3731
       generated code */
3732
    saved_env = env;
3733
    env = cpu_single_env;
3734
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3735
    if (unlikely(ret != 0)) {
3736
        if (likely(retaddr)) {
3737
            /* now we have a real cpu fault */
3738
            pc = (unsigned long)retaddr;
3739
            tb = tb_find_pc(pc);
3740
            if (likely(tb)) {
3741
                /* the PC is inside the translated code. It means that we have
3742
                   a virtual CPU fault */
3743
                cpu_restore_state(tb, env, pc);
3744
            }
3745
        }
3746
        helper_raise_exception_err(env->exception_index, env->error_code);
3747
    }
3748
    env = saved_env;
3749
}
3750

    
3751
/* Segment registers load and store */
3752
target_ulong helper_load_sr (target_ulong sr_num)
3753
{
3754
#if defined(TARGET_PPC64)
3755
    if (env->mmu_model & POWERPC_MMU_64)
3756
        return ppc_load_sr(env, sr_num);
3757
#endif
3758
    return env->sr[sr_num];
3759
}
3760

    
3761
void helper_store_sr (target_ulong sr_num, target_ulong val)
3762
{
3763
    ppc_store_sr(env, sr_num, val);
3764
}
3765

    
3766
/* SLB management */
3767
#if defined(TARGET_PPC64)
3768
void helper_store_slb (target_ulong rb, target_ulong rs)
3769
{
3770
    if (ppc_store_slb(env, rb, rs) < 0) {
3771
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3772
    }
3773
}
3774

    
3775
target_ulong helper_load_slb_esid (target_ulong rb)
3776
{
3777
    target_ulong rt;
3778

    
3779
    if (ppc_load_slb_esid(env, rb, &rt) < 0) {
3780
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3781
    }
3782
    return rt;
3783
}
3784

    
3785
target_ulong helper_load_slb_vsid (target_ulong rb)
3786
{
3787
    target_ulong rt;
3788

    
3789
    if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
3790
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3791
    }
3792
    return rt;
3793
}
3794

    
3795
void helper_slbia (void)
3796
{
3797
    ppc_slb_invalidate_all(env);
3798
}
3799

    
3800
void helper_slbie (target_ulong addr)
3801
{
3802
    ppc_slb_invalidate_one(env, addr);
3803
}
3804

    
3805
#endif /* defined(TARGET_PPC64) */
3806

    
3807
/* TLB management */
3808
void helper_tlbia (void)
3809
{
3810
    ppc_tlb_invalidate_all(env);
3811
}
3812

    
3813
void helper_tlbie (target_ulong addr)
3814
{
3815
    ppc_tlb_invalidate_one(env, addr);
3816
}
3817

    
3818
/* Software driven TLBs management */
3819
/* PowerPC 602/603 software TLB load instructions helpers */
3820
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3821
{
3822
    target_ulong RPN, CMP, EPN;
3823
    int way;
3824

    
3825
    RPN = env->spr[SPR_RPA];
3826
    if (is_code) {
3827
        CMP = env->spr[SPR_ICMP];
3828
        EPN = env->spr[SPR_IMISS];
3829
    } else {
3830
        CMP = env->spr[SPR_DCMP];
3831
        EPN = env->spr[SPR_DMISS];
3832
    }
3833
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3834
    (void)EPN; /* avoid a compiler warning */
3835
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3836
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3837
              RPN, way);
3838
    /* Store this TLB */
3839
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3840
                     way, is_code, CMP, RPN);
3841
}
3842

    
3843
void helper_6xx_tlbd (target_ulong EPN)
3844
{
3845
    do_6xx_tlb(EPN, 0);
3846
}
3847

    
3848
void helper_6xx_tlbi (target_ulong EPN)
3849
{
3850
    do_6xx_tlb(EPN, 1);
3851
}
3852

    
3853
/* PowerPC 74xx software TLB load instructions helpers */
3854
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3855
{
3856
    target_ulong RPN, CMP, EPN;
3857
    int way;
3858

    
3859
    RPN = env->spr[SPR_PTELO];
3860
    CMP = env->spr[SPR_PTEHI];
3861
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3862
    way = env->spr[SPR_TLBMISS] & 0x3;
3863
    (void)EPN; /* avoid a compiler warning */
3864
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3865
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3866
              RPN, way);
3867
    /* Store this TLB */
3868
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3869
                     way, is_code, CMP, RPN);
3870
}
3871

    
3872
void helper_74xx_tlbd (target_ulong EPN)
3873
{
3874
    do_74xx_tlb(EPN, 0);
3875
}
3876

    
3877
void helper_74xx_tlbi (target_ulong EPN)
3878
{
3879
    do_74xx_tlb(EPN, 1);
3880
}
3881

    
3882
static inline target_ulong booke_tlb_to_page_size(int size)
3883
{
3884
    return 1024 << (2 * size);
3885
}
3886

    
3887
static inline int booke_page_size_to_tlb(target_ulong page_size)
3888
{
3889
    int size;
3890

    
3891
    switch (page_size) {
3892
    case 0x00000400UL:
3893
        size = 0x0;
3894
        break;
3895
    case 0x00001000UL:
3896
        size = 0x1;
3897
        break;
3898
    case 0x00004000UL:
3899
        size = 0x2;
3900
        break;
3901
    case 0x00010000UL:
3902
        size = 0x3;
3903
        break;
3904
    case 0x00040000UL:
3905
        size = 0x4;
3906
        break;
3907
    case 0x00100000UL:
3908
        size = 0x5;
3909
        break;
3910
    case 0x00400000UL:
3911
        size = 0x6;
3912
        break;
3913
    case 0x01000000UL:
3914
        size = 0x7;
3915
        break;
3916
    case 0x04000000UL:
3917
        size = 0x8;
3918
        break;
3919
    case 0x10000000UL:
3920
        size = 0x9;
3921
        break;
3922
    case 0x40000000UL:
3923
        size = 0xA;
3924
        break;
3925
#if defined (TARGET_PPC64)
3926
    case 0x000100000000ULL:
3927
        size = 0xB;
3928
        break;
3929
    case 0x000400000000ULL:
3930
        size = 0xC;
3931
        break;
3932
    case 0x001000000000ULL:
3933
        size = 0xD;
3934
        break;
3935
    case 0x004000000000ULL:
3936
        size = 0xE;
3937
        break;
3938
    case 0x010000000000ULL:
3939
        size = 0xF;
3940
        break;
3941
#endif
3942
    default:
3943
        size = -1;
3944
        break;
3945
    }
3946

    
3947
    return size;
3948
}
3949

    
3950
/* Helpers for 4xx TLB management */
3951
#define PPC4XX_TLB_ENTRY_MASK       0x0000003f  /* Mask for 64 TLB entries */
3952

    
3953
#define PPC4XX_TLBHI_V              0x00000040
3954
#define PPC4XX_TLBHI_E              0x00000020
3955
#define PPC4XX_TLBHI_SIZE_MIN       0
3956
#define PPC4XX_TLBHI_SIZE_MAX       7
3957
#define PPC4XX_TLBHI_SIZE_DEFAULT   1
3958
#define PPC4XX_TLBHI_SIZE_SHIFT     7
3959
#define PPC4XX_TLBHI_SIZE_MASK      0x00000007
3960

    
3961
#define PPC4XX_TLBLO_EX             0x00000200
3962
#define PPC4XX_TLBLO_WR             0x00000100
3963
#define PPC4XX_TLBLO_ATTR_MASK      0x000000FF
3964
#define PPC4XX_TLBLO_RPN_MASK       0xFFFFFC00
3965

    
3966
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3967
{
3968
    ppcemb_tlb_t *tlb;
3969
    target_ulong ret;
3970
    int size;
3971

    
3972
    entry &= PPC4XX_TLB_ENTRY_MASK;
3973
    tlb = &env->tlb[entry].tlbe;
3974
    ret = tlb->EPN;
3975
    if (tlb->prot & PAGE_VALID) {
3976
        ret |= PPC4XX_TLBHI_V;
3977
    }
3978
    size = booke_page_size_to_tlb(tlb->size);
3979
    if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
3980
        size = PPC4XX_TLBHI_SIZE_DEFAULT;
3981
    }
3982
    ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
3983
    env->spr[SPR_40x_PID] = tlb->PID;
3984
    return ret;
3985
}
3986

    
3987
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3988
{
3989
    ppcemb_tlb_t *tlb;
3990
    target_ulong ret;
3991

    
3992
    entry &= PPC4XX_TLB_ENTRY_MASK;
3993
    tlb = &env->tlb[entry].tlbe;
3994
    ret = tlb->RPN;
3995
    if (tlb->prot & PAGE_EXEC) {
3996
        ret |= PPC4XX_TLBLO_EX;
3997
    }
3998
    if (tlb->prot & PAGE_WRITE) {
3999
        ret |= PPC4XX_TLBLO_WR;
4000
    }
4001
    return ret;
4002
}
4003

    
4004
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
4005
{
4006
    ppcemb_tlb_t *tlb;
4007
    target_ulong page, end;
4008

    
4009
    LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
4010
              val);
4011
    entry &= PPC4XX_TLB_ENTRY_MASK;
4012
    tlb = &env->tlb[entry].tlbe;
4013
    /* Invalidate previous TLB (if it's valid) */
4014
    if (tlb->prot & PAGE_VALID) {
4015
        end = tlb->EPN + tlb->size;
4016
        LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
4017
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4018
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4019
            tlb_flush_page(env, page);
4020
        }
4021
    }
4022
    tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
4023
                                       & PPC4XX_TLBHI_SIZE_MASK);
4024
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
4025
     * If this ever occurs, one should use the ppcemb target instead
4026
     * of the ppc or ppc64 one
4027
     */
4028
    if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
4029
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
4030
                  "are not supported (%d)\n",
4031
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
4032
    }
4033
    tlb->EPN = val & ~(tlb->size - 1);
4034
    if (val & PPC4XX_TLBHI_V) {
4035
        tlb->prot |= PAGE_VALID;
4036
        if (val & PPC4XX_TLBHI_E) {
4037
            /* XXX: TO BE FIXED */
4038
            cpu_abort(env,
4039
                      "Little-endian TLB entries are not supported by now\n");
4040
        }
4041
    } else {
4042
        tlb->prot &= ~PAGE_VALID;
4043
    }
4044
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
4045
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4046
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4047
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4048
              tlb->prot & PAGE_READ ? 'r' : '-',
4049
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4050
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4051
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4052
    /* Invalidate new TLB (if valid) */
4053
    if (tlb->prot & PAGE_VALID) {
4054
        end = tlb->EPN + tlb->size;
4055
        LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4056
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4057
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4058
            tlb_flush_page(env, page);
4059
        }
4060
    }
4061
}
4062

    
4063
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4064
{
4065
    ppcemb_tlb_t *tlb;
4066

    
4067
    LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4068
              val);
4069
    entry &= PPC4XX_TLB_ENTRY_MASK;
4070
    tlb = &env->tlb[entry].tlbe;
4071
    tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4072
    tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4073
    tlb->prot = PAGE_READ;
4074
    if (val & PPC4XX_TLBLO_EX) {
4075
        tlb->prot |= PAGE_EXEC;
4076
    }
4077
    if (val & PPC4XX_TLBLO_WR) {
4078
        tlb->prot |= PAGE_WRITE;
4079
    }
4080
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4081
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4082
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4083
              tlb->prot & PAGE_READ ? 'r' : '-',
4084
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4085
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4086
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4087
}
4088

    
4089
target_ulong helper_4xx_tlbsx (target_ulong address)
4090
{
4091
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4092
}
4093

    
4094
/* PowerPC 440 TLB management */
4095
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4096
{
4097
    ppcemb_tlb_t *tlb;
4098
    target_ulong EPN, RPN, size;
4099
    int do_flush_tlbs;
4100

    
4101
    LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4102
              __func__, word, (int)entry, value);
4103
    do_flush_tlbs = 0;
4104
    entry &= 0x3F;
4105
    tlb = &env->tlb[entry].tlbe;
4106
    switch (word) {
4107
    default:
4108
        /* Just here to please gcc */
4109
    case 0:
4110
        EPN = value & 0xFFFFFC00;
4111
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4112
            do_flush_tlbs = 1;
4113
        tlb->EPN = EPN;
4114
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
4115
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4116
            do_flush_tlbs = 1;
4117
        tlb->size = size;
4118
        tlb->attr &= ~0x1;
4119
        tlb->attr |= (value >> 8) & 1;
4120
        if (value & 0x200) {
4121
            tlb->prot |= PAGE_VALID;
4122
        } else {
4123
            if (tlb->prot & PAGE_VALID) {
4124
                tlb->prot &= ~PAGE_VALID;
4125
                do_flush_tlbs = 1;
4126
            }
4127
        }
4128
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4129
        if (do_flush_tlbs)
4130
            tlb_flush(env, 1);
4131
        break;
4132
    case 1:
4133
        RPN = value & 0xFFFFFC0F;
4134
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4135
            tlb_flush(env, 1);
4136
        tlb->RPN = RPN;
4137
        break;
4138
    case 2:
4139
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4140
        tlb->prot = tlb->prot & PAGE_VALID;
4141
        if (value & 0x1)
4142
            tlb->prot |= PAGE_READ << 4;
4143
        if (value & 0x2)
4144
            tlb->prot |= PAGE_WRITE << 4;
4145
        if (value & 0x4)
4146
            tlb->prot |= PAGE_EXEC << 4;
4147
        if (value & 0x8)
4148
            tlb->prot |= PAGE_READ;
4149
        if (value & 0x10)
4150
            tlb->prot |= PAGE_WRITE;
4151
        if (value & 0x20)
4152
            tlb->prot |= PAGE_EXEC;
4153
        break;
4154
    }
4155
}
4156

    
4157
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4158
{
4159
    ppcemb_tlb_t *tlb;
4160
    target_ulong ret;
4161
    int size;
4162

    
4163
    entry &= 0x3F;
4164
    tlb = &env->tlb[entry].tlbe;
4165
    switch (word) {
4166
    default:
4167
        /* Just here to please gcc */
4168
    case 0:
4169
        ret = tlb->EPN;
4170
        size = booke_page_size_to_tlb(tlb->size);
4171
        if (size < 0 || size > 0xF)
4172
            size = 1;
4173
        ret |= size << 4;
4174
        if (tlb->attr & 0x1)
4175
            ret |= 0x100;
4176
        if (tlb->prot & PAGE_VALID)
4177
            ret |= 0x200;
4178
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4179
        env->spr[SPR_440_MMUCR] |= tlb->PID;
4180
        break;
4181
    case 1:
4182
        ret = tlb->RPN;
4183
        break;
4184
    case 2:
4185
        ret = tlb->attr & ~0x1;
4186
        if (tlb->prot & (PAGE_READ << 4))
4187
            ret |= 0x1;
4188
        if (tlb->prot & (PAGE_WRITE << 4))
4189
            ret |= 0x2;
4190
        if (tlb->prot & (PAGE_EXEC << 4))
4191
            ret |= 0x4;
4192
        if (tlb->prot & PAGE_READ)
4193
            ret |= 0x8;
4194
        if (tlb->prot & PAGE_WRITE)
4195
            ret |= 0x10;
4196
        if (tlb->prot & PAGE_EXEC)
4197
            ret |= 0x20;
4198
        break;
4199
    }
4200
    return ret;
4201
}
4202

    
4203
target_ulong helper_440_tlbsx (target_ulong address)
4204
{
4205
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4206
}
4207

    
4208
/* PowerPC BookE 2.06 TLB management */
4209

    
4210
static ppcemb_tlb_t *booke206_cur_tlb(CPUState *env)
4211
{
4212
    uint32_t tlbncfg = 0;
4213
    int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
4214
    int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
4215
    int tlb;
4216

    
4217
    tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
4218
    tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
4219

    
4220
    if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
4221
        cpu_abort(env, "we don't support HES yet\n");
4222
    }
4223

    
4224
    return booke206_get_tlbe(env, tlb, ea, esel);
4225
}
4226

    
4227
static inline target_phys_addr_t booke206_tlb_to_page_size(int size)
4228
{
4229
    return (1 << (size << 1)) << 10;
4230
}
4231

    
4232
static inline target_phys_addr_t booke206_page_size_to_tlb(uint64_t size)
4233
{
4234
    return (ffs(size >> 10) - 1) >> 1;
4235
}
4236

    
4237
void helper_booke_setpid(uint32_t pidn, target_ulong pid)
4238
{
4239
    env->spr[pidn] = pid;
4240
    /* changing PIDs mean we're in a different address space now */
4241
    tlb_flush(env, 1);
4242
}
4243

    
4244
void helper_booke206_tlbwe(void)
4245
{
4246
    uint32_t tlbncfg, tlbn;
4247
    ppcemb_tlb_t *tlb;
4248
    target_phys_addr_t rpn;
4249
    int tlbe_size;
4250

    
4251
    switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
4252
    case MAS0_WQ_ALWAYS:
4253
        /* good to go, write that entry */
4254
        break;
4255
    case MAS0_WQ_COND:
4256
        /* XXX check if reserved */
4257
        if (0) {
4258
            return;
4259
        }
4260
        break;
4261
    case MAS0_WQ_CLR_RSRV:
4262
        /* XXX clear entry */
4263
        return;
4264
    default:
4265
        /* no idea what to do */
4266
        return;
4267
    }
4268

    
4269
    if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
4270
         !msr_gs) {
4271
        /* XXX we don't support direct LRAT setting yet */
4272
        fprintf(stderr, "cpu: don't support LRAT setting yet\n");
4273
        return;
4274
    }
4275

    
4276
    tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
4277
    tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
4278

    
4279
    tlb = booke206_cur_tlb(env);
4280

    
4281
    if (msr_gs) {
4282
        cpu_abort(env, "missing HV implementation\n");
4283
    } else {
4284
        rpn = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
4285
              (env->spr[SPR_BOOKE_MAS3] & 0xfffff000);
4286
    }
4287
    tlb->RPN = rpn;
4288

    
4289
    tlb->PID = (env->spr[SPR_BOOKE_MAS1] & MAS1_TID_MASK) >> MAS1_TID_SHIFT;
4290
    if (tlbncfg & TLBnCFG_AVAIL) {
4291
        tlbe_size = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK)
4292
                    >> MAS1_TSIZE_SHIFT;
4293
    } else {
4294
        tlbe_size = (tlbncfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
4295
    }
4296

    
4297
    tlb->size = booke206_tlb_to_page_size(tlbe_size);
4298
    tlb->EPN = (uint32_t)(env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
4299
    tlb->attr = env->spr[SPR_BOOKE_MAS2] & (MAS2_ACM | MAS2_VLE | MAS2_W |
4300
                                            MAS2_I | MAS2_M | MAS2_G | MAS2_E)
4301
                << 1;
4302

    
4303
    if (tlbncfg & TLBnCFG_IPROT) {
4304
        tlb->attr |= env->spr[SPR_BOOKE_MAS1] & MAS1_IPROT;
4305
    }
4306
    tlb->attr |= (env->spr[SPR_BOOKE_MAS3] &
4307
                  ((MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3)) << 8);
4308
    if (env->spr[SPR_BOOKE_MAS1] & MAS1_TS) {
4309
        tlb->attr |= 1;
4310
    }
4311

    
4312
    tlb->prot = 0;
4313

    
4314
    if (env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) {
4315
        tlb->prot |= PAGE_VALID;
4316
    }
4317
    if (env->spr[SPR_BOOKE_MAS3] & MAS3_UX) {
4318
        tlb->prot |= PAGE_EXEC;
4319
    }
4320
    if (env->spr[SPR_BOOKE_MAS3] & MAS3_SX) {
4321
        tlb->prot |= PAGE_EXEC << 4;
4322
    }
4323
    if (env->spr[SPR_BOOKE_MAS3] & MAS3_UW) {
4324
        tlb->prot |= PAGE_WRITE;
4325
    }
4326
    if (env->spr[SPR_BOOKE_MAS3] & MAS3_SW) {
4327
        tlb->prot |= PAGE_WRITE << 4;
4328
    }
4329
    if (env->spr[SPR_BOOKE_MAS3] & MAS3_UR) {
4330
        tlb->prot |= PAGE_READ;
4331
    }
4332
    if (env->spr[SPR_BOOKE_MAS3] & MAS3_SR) {
4333
        tlb->prot |= PAGE_READ << 4;
4334
    }
4335

    
4336
    if (tlb->size == TARGET_PAGE_SIZE) {
4337
        tlb_flush_page(env, tlb->EPN);
4338
    } else {
4339
        tlb_flush(env, 1);
4340
    }
4341
}
4342

    
4343
static inline void booke206_tlb_to_mas(CPUState *env, ppcemb_tlb_t *tlb)
4344
{
4345
    int tlbn = booke206_tlbe_to_tlbn(env, tlb);
4346
    int way = booke206_tlbe_to_way(env, tlb);
4347

    
4348
    env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
4349
    env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
4350

    
4351
    env->spr[SPR_BOOKE_MAS1] = MAS1_VALID;
4352
    env->spr[SPR_BOOKE_MAS2] = 0;
4353

    
4354
    env->spr[SPR_BOOKE_MAS7] = (uint64_t)tlb->RPN >> 32;
4355
    env->spr[SPR_BOOKE_MAS3] = tlb->RPN;
4356
    env->spr[SPR_BOOKE_MAS1] |= tlb->PID << MAS1_TID_SHIFT;
4357
    env->spr[SPR_BOOKE_MAS1] |= booke206_page_size_to_tlb(tlb->size)
4358
                                << MAS1_TSIZE_SHIFT;
4359
    env->spr[SPR_BOOKE_MAS1] |= tlb->attr & MAS1_IPROT;
4360
    if (tlb->attr & 1) {
4361
        env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
4362
    }
4363

    
4364
    env->spr[SPR_BOOKE_MAS2] = tlb->EPN;
4365
    env->spr[SPR_BOOKE_MAS2] |= (tlb->attr >> 1) &
4366
        (MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E);
4367

    
4368
    if (tlb->prot & PAGE_EXEC) {
4369
        env->spr[SPR_BOOKE_MAS3] |= MAS3_UX;
4370
    }
4371
    if (tlb->prot & (PAGE_EXEC << 4)) {
4372
        env->spr[SPR_BOOKE_MAS3] |= MAS3_SX;
4373
    }
4374
    if (tlb->prot & PAGE_WRITE) {
4375
        env->spr[SPR_BOOKE_MAS3] |= MAS3_UW;
4376
    }
4377
    if (tlb->prot & (PAGE_WRITE << 4)) {
4378
        env->spr[SPR_BOOKE_MAS3] |= MAS3_SW;
4379
    }
4380
    if (tlb->prot & PAGE_READ) {
4381
        env->spr[SPR_BOOKE_MAS3] |= MAS3_UR;
4382
    }
4383
    if (tlb->prot & (PAGE_READ << 4)) {
4384
        env->spr[SPR_BOOKE_MAS3] |= MAS3_SR;
4385
    }
4386

    
4387
    env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
4388
}
4389

    
4390
void helper_booke206_tlbre(void)
4391
{
4392
    ppcemb_tlb_t *tlb = NULL;
4393

    
4394
    tlb = booke206_cur_tlb(env);
4395
    booke206_tlb_to_mas(env, tlb);
4396
}
4397

    
4398
void helper_booke206_tlbsx(target_ulong address)
4399
{
4400
    ppcemb_tlb_t *tlb = NULL;
4401
    int i, j;
4402
    target_phys_addr_t raddr;
4403
    uint32_t spid, sas;
4404

    
4405
    spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
4406
    sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
4407

    
4408
    for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
4409
        int ways = booke206_tlb_ways(env, i);
4410

    
4411
        for (j = 0; j < ways; j++) {
4412
            tlb = booke206_get_tlbe(env, i, address, j);
4413

    
4414
            if (ppcemb_tlb_check(env, tlb, &raddr, address, spid, 0, j)) {
4415
                continue;
4416
            }
4417

    
4418
            if (sas != (tlb->attr & MAS6_SAS)) {
4419
                continue;
4420
            }
4421

    
4422
            booke206_tlb_to_mas(env, tlb);
4423
            return;
4424
        }
4425
    }
4426

    
4427
    /* no entry found, fill with defaults */
4428
    env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
4429
    env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
4430
    env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
4431
    env->spr[SPR_BOOKE_MAS3] = 0;
4432
    env->spr[SPR_BOOKE_MAS7] = 0;
4433

    
4434
    if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
4435
        env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
4436
    }
4437

    
4438
    env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
4439
                                << MAS1_TID_SHIFT;
4440

    
4441
    /* next victim logic */
4442
    env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
4443
    env->last_way++;
4444
    env->last_way &= booke206_tlb_ways(env, 0) - 1;
4445
    env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
4446
}
4447

    
4448
static inline void booke206_invalidate_ea_tlb(CPUState *env, int tlbn,
4449
                                              uint32_t ea)
4450
{
4451
    int i;
4452
    int ways = booke206_tlb_ways(env, tlbn);
4453

    
4454
    for (i = 0; i < ways; i++) {
4455
        ppcemb_tlb_t *tlb = booke206_get_tlbe(env, tlbn, ea, i);
4456
        target_phys_addr_t masked_ea = ea & ~(tlb->size - 1);
4457
        if ((tlb->EPN == (masked_ea >> MAS2_EPN_SHIFT)) &&
4458
            !(tlb->attr & MAS1_IPROT)) {
4459
            tlb->prot = 0;
4460
        }
4461
    }
4462
}
4463

    
4464
void helper_booke206_tlbivax(target_ulong address)
4465
{
4466
    if (address & 0x4) {
4467
        /* flush all entries */
4468
        if (address & 0x8) {
4469
            /* flush all of TLB1 */
4470
            booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
4471
        } else {
4472
            /* flush all of TLB0 */
4473
            booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
4474
        }
4475
        return;
4476
    }
4477

    
4478
    if (address & 0x8) {
4479
        /* flush TLB1 entries */
4480
        booke206_invalidate_ea_tlb(env, 1, address);
4481
        tlb_flush(env, 1);
4482
    } else {
4483
        /* flush TLB0 entries */
4484
        booke206_invalidate_ea_tlb(env, 0, address);
4485
        tlb_flush_page(env, address & MAS2_EPN_MASK);
4486
    }
4487
}
4488

    
4489
void helper_booke206_tlbflush(uint32_t type)
4490
{
4491
    int flags = 0;
4492

    
4493
    if (type & 2) {
4494
        flags |= BOOKE206_FLUSH_TLB1;
4495
    }
4496

    
4497
    if (type & 4) {
4498
        flags |= BOOKE206_FLUSH_TLB0;
4499
    }
4500

    
4501
    booke206_flush_tlb(env, flags, 1);
4502
}
4503

    
4504
#endif /* !CONFIG_USER_ONLY */