Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ 6e87b7c7

History | View | Annotate | Download (113.3 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include <string.h>
21
#include "exec.h"
22
#include "host-utils.h"
23
#include "helper.h"
24

    
25
#include "helper_regs.h"
26

    
27
//#define DEBUG_OP
28
//#define DEBUG_EXCEPTIONS
29
//#define DEBUG_SOFTWARE_TLB
30

    
31
#ifdef DEBUG_SOFTWARE_TLB
32
#  define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
33
#else
34
#  define LOG_SWTLB(...) do { } while (0)
35
#endif
36

    
37

    
38
/*****************************************************************************/
39
/* Exceptions processing helpers */
40

    
41
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
42
{
43
#if 0
44
    printf("Raise exception %3x code : %d\n", exception, error_code);
45
#endif
46
    env->exception_index = exception;
47
    env->error_code = error_code;
48
    cpu_loop_exit();
49
}
50

    
51
void helper_raise_exception (uint32_t exception)
52
{
53
    helper_raise_exception_err(exception, 0);
54
}
55

    
56
/*****************************************************************************/
57
/* Registers load and stores */
58
target_ulong helper_load_cr (void)
59
{
60
    return (env->crf[0] << 28) |
61
           (env->crf[1] << 24) |
62
           (env->crf[2] << 20) |
63
           (env->crf[3] << 16) |
64
           (env->crf[4] << 12) |
65
           (env->crf[5] << 8) |
66
           (env->crf[6] << 4) |
67
           (env->crf[7] << 0);
68
}
69

    
70
void helper_store_cr (target_ulong val, uint32_t mask)
71
{
72
    int i, sh;
73

    
74
    for (i = 0, sh = 7; i < 8; i++, sh--) {
75
        if (mask & (1 << sh))
76
            env->crf[i] = (val >> (sh * 4)) & 0xFUL;
77
    }
78
}
79

    
80
/*****************************************************************************/
81
/* SPR accesses */
82
void helper_load_dump_spr (uint32_t sprn)
83
{
84
    qemu_log("Read SPR %d %03x => " ADDRX "\n",
85
                sprn, sprn, env->spr[sprn]);
86
}
87

    
88
void helper_store_dump_spr (uint32_t sprn)
89
{
90
    qemu_log("Write SPR %d %03x <= " ADDRX "\n",
91
                sprn, sprn, env->spr[sprn]);
92
}
93

    
94
target_ulong helper_load_tbl (void)
95
{
96
    return cpu_ppc_load_tbl(env);
97
}
98

    
99
target_ulong helper_load_tbu (void)
100
{
101
    return cpu_ppc_load_tbu(env);
102
}
103

    
104
target_ulong helper_load_atbl (void)
105
{
106
    return cpu_ppc_load_atbl(env);
107
}
108

    
109
target_ulong helper_load_atbu (void)
110
{
111
    return cpu_ppc_load_atbu(env);
112
}
113

    
114
target_ulong helper_load_601_rtcl (void)
115
{
116
    return cpu_ppc601_load_rtcl(env);
117
}
118

    
119
target_ulong helper_load_601_rtcu (void)
120
{
121
    return cpu_ppc601_load_rtcu(env);
122
}
123

    
124
#if !defined(CONFIG_USER_ONLY)
125
#if defined (TARGET_PPC64)
126
void helper_store_asr (target_ulong val)
127
{
128
    ppc_store_asr(env, val);
129
}
130
#endif
131

    
132
void helper_store_sdr1 (target_ulong val)
133
{
134
    ppc_store_sdr1(env, val);
135
}
136

    
137
void helper_store_tbl (target_ulong val)
138
{
139
    cpu_ppc_store_tbl(env, val);
140
}
141

    
142
void helper_store_tbu (target_ulong val)
143
{
144
    cpu_ppc_store_tbu(env, val);
145
}
146

    
147
void helper_store_atbl (target_ulong val)
148
{
149
    cpu_ppc_store_atbl(env, val);
150
}
151

    
152
void helper_store_atbu (target_ulong val)
153
{
154
    cpu_ppc_store_atbu(env, val);
155
}
156

    
157
void helper_store_601_rtcl (target_ulong val)
158
{
159
    cpu_ppc601_store_rtcl(env, val);
160
}
161

    
162
void helper_store_601_rtcu (target_ulong val)
163
{
164
    cpu_ppc601_store_rtcu(env, val);
165
}
166

    
167
target_ulong helper_load_decr (void)
168
{
169
    return cpu_ppc_load_decr(env);
170
}
171

    
172
void helper_store_decr (target_ulong val)
173
{
174
    cpu_ppc_store_decr(env, val);
175
}
176

    
177
void helper_store_hid0_601 (target_ulong val)
178
{
179
    target_ulong hid0;
180

    
181
    hid0 = env->spr[SPR_HID0];
182
    if ((val ^ hid0) & 0x00000008) {
183
        /* Change current endianness */
184
        env->hflags &= ~(1 << MSR_LE);
185
        env->hflags_nmsr &= ~(1 << MSR_LE);
186
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
187
        env->hflags |= env->hflags_nmsr;
188
        qemu_log("%s: set endianness to %c => " ADDRX "\n",
189
                    __func__, val & 0x8 ? 'l' : 'b', env->hflags);
190
    }
191
    env->spr[SPR_HID0] = (uint32_t)val;
192
}
193

    
194
void helper_store_403_pbr (uint32_t num, target_ulong value)
195
{
196
    if (likely(env->pb[num] != value)) {
197
        env->pb[num] = value;
198
        /* Should be optimized */
199
        tlb_flush(env, 1);
200
    }
201
}
202

    
203
target_ulong helper_load_40x_pit (void)
204
{
205
    return load_40x_pit(env);
206
}
207

    
208
void helper_store_40x_pit (target_ulong val)
209
{
210
    store_40x_pit(env, val);
211
}
212

    
213
void helper_store_40x_dbcr0 (target_ulong val)
214
{
215
    store_40x_dbcr0(env, val);
216
}
217

    
218
void helper_store_40x_sler (target_ulong val)
219
{
220
    store_40x_sler(env, val);
221
}
222

    
223
void helper_store_booke_tcr (target_ulong val)
224
{
225
    store_booke_tcr(env, val);
226
}
227

    
228
void helper_store_booke_tsr (target_ulong val)
229
{
230
    store_booke_tsr(env, val);
231
}
232

    
233
void helper_store_ibatu (uint32_t nr, target_ulong val)
234
{
235
    ppc_store_ibatu(env, nr, val);
236
}
237

    
238
void helper_store_ibatl (uint32_t nr, target_ulong val)
239
{
240
    ppc_store_ibatl(env, nr, val);
241
}
242

    
243
void helper_store_dbatu (uint32_t nr, target_ulong val)
244
{
245
    ppc_store_dbatu(env, nr, val);
246
}
247

    
248
void helper_store_dbatl (uint32_t nr, target_ulong val)
249
{
250
    ppc_store_dbatl(env, nr, val);
251
}
252

    
253
void helper_store_601_batl (uint32_t nr, target_ulong val)
254
{
255
    ppc_store_ibatl_601(env, nr, val);
256
}
257

    
258
void helper_store_601_batu (uint32_t nr, target_ulong val)
259
{
260
    ppc_store_ibatu_601(env, nr, val);
261
}
262
#endif
263

    
264
/*****************************************************************************/
265
/* Memory load and stores */
266

    
267
static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
268
{
269
#if defined(TARGET_PPC64)
270
        if (!msr_sf)
271
            return (uint32_t)(addr + arg);
272
        else
273
#endif
274
            return addr + arg;
275
}
276

    
277
void helper_lmw (target_ulong addr, uint32_t reg)
278
{
279
    for (; reg < 32; reg++) {
280
        if (msr_le)
281
            env->gpr[reg] = bswap32(ldl(addr));
282
        else
283
            env->gpr[reg] = ldl(addr);
284
        addr = addr_add(addr, 4);
285
    }
286
}
287

    
288
void helper_stmw (target_ulong addr, uint32_t reg)
289
{
290
    for (; reg < 32; reg++) {
291
        if (msr_le)
292
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
293
        else
294
            stl(addr, (uint32_t)env->gpr[reg]);
295
        addr = addr_add(addr, 4);
296
    }
297
}
298

    
299
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
300
{
301
    int sh;
302
    for (; nb > 3; nb -= 4) {
303
        env->gpr[reg] = ldl(addr);
304
        reg = (reg + 1) % 32;
305
        addr = addr_add(addr, 4);
306
    }
307
    if (unlikely(nb > 0)) {
308
        env->gpr[reg] = 0;
309
        for (sh = 24; nb > 0; nb--, sh -= 8) {
310
            env->gpr[reg] |= ldub(addr) << sh;
311
            addr = addr_add(addr, 1);
312
        }
313
    }
314
}
315
/* PPC32 specification says we must generate an exception if
316
 * rA is in the range of registers to be loaded.
317
 * In an other hand, IBM says this is valid, but rA won't be loaded.
318
 * For now, I'll follow the spec...
319
 */
320
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
321
{
322
    if (likely(xer_bc != 0)) {
323
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
324
                     (reg < rb && (reg + xer_bc) > rb))) {
325
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
326
                                       POWERPC_EXCP_INVAL |
327
                                       POWERPC_EXCP_INVAL_LSWX);
328
        } else {
329
            helper_lsw(addr, xer_bc, reg);
330
        }
331
    }
332
}
333

    
334
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
335
{
336
    int sh;
337
    for (; nb > 3; nb -= 4) {
338
        stl(addr, env->gpr[reg]);
339
        reg = (reg + 1) % 32;
340
        addr = addr_add(addr, 4);
341
    }
342
    if (unlikely(nb > 0)) {
343
        for (sh = 24; nb > 0; nb--, sh -= 8) {
344
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
345
            addr = addr_add(addr, 1);
346
        }
347
    }
348
}
349

    
350
static void do_dcbz(target_ulong addr, int dcache_line_size)
351
{
352
    addr &= ~(dcache_line_size - 1);
353
    int i;
354
    for (i = 0 ; i < dcache_line_size ; i += 4) {
355
        stl(addr + i , 0);
356
    }
357
    if (env->reserve == addr)
358
        env->reserve = (target_ulong)-1ULL;
359
}
360

    
361
void helper_dcbz(target_ulong addr)
362
{
363
    do_dcbz(addr, env->dcache_line_size);
364
}
365

    
366
void helper_dcbz_970(target_ulong addr)
367
{
368
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
369
        do_dcbz(addr, 32);
370
    else
371
        do_dcbz(addr, env->dcache_line_size);
372
}
373

    
374
void helper_icbi(target_ulong addr)
375
{
376
    uint32_t tmp;
377

    
378
    addr &= ~(env->dcache_line_size - 1);
379
    /* Invalidate one cache line :
380
     * PowerPC specification says this is to be treated like a load
381
     * (not a fetch) by the MMU. To be sure it will be so,
382
     * do the load "by hand".
383
     */
384
    tmp = ldl(addr);
385
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
386
}
387

    
388
// XXX: to be tested
389
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
390
{
391
    int i, c, d;
392
    d = 24;
393
    for (i = 0; i < xer_bc; i++) {
394
        c = ldub(addr);
395
        addr = addr_add(addr, 1);
396
        /* ra (if not 0) and rb are never modified */
397
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
398
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
399
        }
400
        if (unlikely(c == xer_cmp))
401
            break;
402
        if (likely(d != 0)) {
403
            d -= 8;
404
        } else {
405
            d = 24;
406
            reg++;
407
            reg = reg & 0x1F;
408
        }
409
    }
410
    return i;
411
}
412

    
413
/*****************************************************************************/
414
/* Fixed point operations helpers */
415
#if defined(TARGET_PPC64)
416

    
417
/* multiply high word */
418
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
419
{
420
    uint64_t tl, th;
421

    
422
    muls64(&tl, &th, arg1, arg2);
423
    return th;
424
}
425

    
426
/* multiply high word unsigned */
427
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
428
{
429
    uint64_t tl, th;
430

    
431
    mulu64(&tl, &th, arg1, arg2);
432
    return th;
433
}
434

    
435
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
436
{
437
    int64_t th;
438
    uint64_t tl;
439

    
440
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
441
    /* If th != 0 && th != -1, then we had an overflow */
442
    if (likely((uint64_t)(th + 1) <= 1)) {
443
        env->xer &= ~(1 << XER_OV);
444
    } else {
445
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
446
    }
447
    return (int64_t)tl;
448
}
449
#endif
450

    
451
target_ulong helper_cntlzw (target_ulong t)
452
{
453
    return clz32(t);
454
}
455

    
456
#if defined(TARGET_PPC64)
457
target_ulong helper_cntlzd (target_ulong t)
458
{
459
    return clz64(t);
460
}
461
#endif
462

    
463
/* shift right arithmetic helper */
464
target_ulong helper_sraw (target_ulong value, target_ulong shift)
465
{
466
    int32_t ret;
467

    
468
    if (likely(!(shift & 0x20))) {
469
        if (likely((uint32_t)shift != 0)) {
470
            shift &= 0x1f;
471
            ret = (int32_t)value >> shift;
472
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
473
                env->xer &= ~(1 << XER_CA);
474
            } else {
475
                env->xer |= (1 << XER_CA);
476
            }
477
        } else {
478
            ret = (int32_t)value;
479
            env->xer &= ~(1 << XER_CA);
480
        }
481
    } else {
482
        ret = (int32_t)value >> 31;
483
        if (ret) {
484
            env->xer |= (1 << XER_CA);
485
        } else {
486
            env->xer &= ~(1 << XER_CA);
487
        }
488
    }
489
    return (target_long)ret;
490
}
491

    
492
#if defined(TARGET_PPC64)
493
target_ulong helper_srad (target_ulong value, target_ulong shift)
494
{
495
    int64_t ret;
496

    
497
    if (likely(!(shift & 0x40))) {
498
        if (likely((uint64_t)shift != 0)) {
499
            shift &= 0x3f;
500
            ret = (int64_t)value >> shift;
501
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
502
                env->xer &= ~(1 << XER_CA);
503
            } else {
504
                env->xer |= (1 << XER_CA);
505
            }
506
        } else {
507
            ret = (int64_t)value;
508
            env->xer &= ~(1 << XER_CA);
509
        }
510
    } else {
511
        ret = (int64_t)value >> 63;
512
        if (ret) {
513
            env->xer |= (1 << XER_CA);
514
        } else {
515
            env->xer &= ~(1 << XER_CA);
516
        }
517
    }
518
    return ret;
519
}
520
#endif
521

    
522
target_ulong helper_popcntb (target_ulong val)
523
{
524
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
525
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
526
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
527
    return val;
528
}
529

    
530
#if defined(TARGET_PPC64)
531
target_ulong helper_popcntb_64 (target_ulong val)
532
{
533
    val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
534
    val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
535
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
536
    return val;
537
}
538
#endif
539

    
540
/*****************************************************************************/
541
/* Floating point operations helpers */
542
uint64_t helper_float32_to_float64(uint32_t arg)
543
{
544
    CPU_FloatU f;
545
    CPU_DoubleU d;
546
    f.l = arg;
547
    d.d = float32_to_float64(f.f, &env->fp_status);
548
    return d.ll;
549
}
550

    
551
uint32_t helper_float64_to_float32(uint64_t arg)
552
{
553
    CPU_FloatU f;
554
    CPU_DoubleU d;
555
    d.ll = arg;
556
    f.f = float64_to_float32(d.d, &env->fp_status);
557
    return f.l;
558
}
559

    
560
static always_inline int isden (float64 d)
561
{
562
    CPU_DoubleU u;
563

    
564
    u.d = d;
565

    
566
    return ((u.ll >> 52) & 0x7FF) == 0;
567
}
568

    
569
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
570
{
571
    CPU_DoubleU farg;
572
    int isneg;
573
    int ret;
574
    farg.ll = arg;
575
    isneg = float64_is_neg(farg.d);
576
    if (unlikely(float64_is_nan(farg.d))) {
577
        if (float64_is_signaling_nan(farg.d)) {
578
            /* Signaling NaN: flags are undefined */
579
            ret = 0x00;
580
        } else {
581
            /* Quiet NaN */
582
            ret = 0x11;
583
        }
584
    } else if (unlikely(float64_is_infinity(farg.d))) {
585
        /* +/- infinity */
586
        if (isneg)
587
            ret = 0x09;
588
        else
589
            ret = 0x05;
590
    } else {
591
        if (float64_is_zero(farg.d)) {
592
            /* +/- zero */
593
            if (isneg)
594
                ret = 0x12;
595
            else
596
                ret = 0x02;
597
        } else {
598
            if (isden(farg.d)) {
599
                /* Denormalized numbers */
600
                ret = 0x10;
601
            } else {
602
                /* Normalized numbers */
603
                ret = 0x00;
604
            }
605
            if (isneg) {
606
                ret |= 0x08;
607
            } else {
608
                ret |= 0x04;
609
            }
610
        }
611
    }
612
    if (set_fprf) {
613
        /* We update FPSCR_FPRF */
614
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
615
        env->fpscr |= ret << FPSCR_FPRF;
616
    }
617
    /* We just need fpcc to update Rc1 */
618
    return ret & 0xF;
619
}
620

    
621
/* Floating-point invalid operations exception */
622
static always_inline uint64_t fload_invalid_op_excp (int op)
623
{
624
    uint64_t ret = 0;
625
    int ve;
626

    
627
    ve = fpscr_ve;
628
    switch (op) {
629
    case POWERPC_EXCP_FP_VXSNAN:
630
        env->fpscr |= 1 << FPSCR_VXSNAN;
631
        break;
632
    case POWERPC_EXCP_FP_VXSOFT:
633
        env->fpscr |= 1 << FPSCR_VXSOFT;
634
        break;
635
    case POWERPC_EXCP_FP_VXISI:
636
        /* Magnitude subtraction of infinities */
637
        env->fpscr |= 1 << FPSCR_VXISI;
638
        goto update_arith;
639
    case POWERPC_EXCP_FP_VXIDI:
640
        /* Division of infinity by infinity */
641
        env->fpscr |= 1 << FPSCR_VXIDI;
642
        goto update_arith;
643
    case POWERPC_EXCP_FP_VXZDZ:
644
        /* Division of zero by zero */
645
        env->fpscr |= 1 << FPSCR_VXZDZ;
646
        goto update_arith;
647
    case POWERPC_EXCP_FP_VXIMZ:
648
        /* Multiplication of zero by infinity */
649
        env->fpscr |= 1 << FPSCR_VXIMZ;
650
        goto update_arith;
651
    case POWERPC_EXCP_FP_VXVC:
652
        /* Ordered comparison of NaN */
653
        env->fpscr |= 1 << FPSCR_VXVC;
654
        env->fpscr &= ~(0xF << FPSCR_FPCC);
655
        env->fpscr |= 0x11 << FPSCR_FPCC;
656
        /* We must update the target FPR before raising the exception */
657
        if (ve != 0) {
658
            env->exception_index = POWERPC_EXCP_PROGRAM;
659
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
660
            /* Update the floating-point enabled exception summary */
661
            env->fpscr |= 1 << FPSCR_FEX;
662
            /* Exception is differed */
663
            ve = 0;
664
        }
665
        break;
666
    case POWERPC_EXCP_FP_VXSQRT:
667
        /* Square root of a negative number */
668
        env->fpscr |= 1 << FPSCR_VXSQRT;
669
    update_arith:
670
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
671
        if (ve == 0) {
672
            /* Set the result to quiet NaN */
673
            ret = 0xFFF8000000000000ULL;
674
            env->fpscr &= ~(0xF << FPSCR_FPCC);
675
            env->fpscr |= 0x11 << FPSCR_FPCC;
676
        }
677
        break;
678
    case POWERPC_EXCP_FP_VXCVI:
679
        /* Invalid conversion */
680
        env->fpscr |= 1 << FPSCR_VXCVI;
681
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
682
        if (ve == 0) {
683
            /* Set the result to quiet NaN */
684
            ret = 0xFFF8000000000000ULL;
685
            env->fpscr &= ~(0xF << FPSCR_FPCC);
686
            env->fpscr |= 0x11 << FPSCR_FPCC;
687
        }
688
        break;
689
    }
690
    /* Update the floating-point invalid operation summary */
691
    env->fpscr |= 1 << FPSCR_VX;
692
    /* Update the floating-point exception summary */
693
    env->fpscr |= 1 << FPSCR_FX;
694
    if (ve != 0) {
695
        /* Update the floating-point enabled exception summary */
696
        env->fpscr |= 1 << FPSCR_FEX;
697
        if (msr_fe0 != 0 || msr_fe1 != 0)
698
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
699
    }
700
    return ret;
701
}
702

    
703
static always_inline void float_zero_divide_excp (void)
704
{
705
    env->fpscr |= 1 << FPSCR_ZX;
706
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
707
    /* Update the floating-point exception summary */
708
    env->fpscr |= 1 << FPSCR_FX;
709
    if (fpscr_ze != 0) {
710
        /* Update the floating-point enabled exception summary */
711
        env->fpscr |= 1 << FPSCR_FEX;
712
        if (msr_fe0 != 0 || msr_fe1 != 0) {
713
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
714
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
715
        }
716
    }
717
}
718

    
719
static always_inline void float_overflow_excp (void)
720
{
721
    env->fpscr |= 1 << FPSCR_OX;
722
    /* Update the floating-point exception summary */
723
    env->fpscr |= 1 << FPSCR_FX;
724
    if (fpscr_oe != 0) {
725
        /* XXX: should adjust the result */
726
        /* Update the floating-point enabled exception summary */
727
        env->fpscr |= 1 << FPSCR_FEX;
728
        /* We must update the target FPR before raising the exception */
729
        env->exception_index = POWERPC_EXCP_PROGRAM;
730
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
731
    } else {
732
        env->fpscr |= 1 << FPSCR_XX;
733
        env->fpscr |= 1 << FPSCR_FI;
734
    }
735
}
736

    
737
static always_inline void float_underflow_excp (void)
738
{
739
    env->fpscr |= 1 << FPSCR_UX;
740
    /* Update the floating-point exception summary */
741
    env->fpscr |= 1 << FPSCR_FX;
742
    if (fpscr_ue != 0) {
743
        /* XXX: should adjust the result */
744
        /* Update the floating-point enabled exception summary */
745
        env->fpscr |= 1 << FPSCR_FEX;
746
        /* We must update the target FPR before raising the exception */
747
        env->exception_index = POWERPC_EXCP_PROGRAM;
748
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
749
    }
750
}
751

    
752
static always_inline void float_inexact_excp (void)
753
{
754
    env->fpscr |= 1 << FPSCR_XX;
755
    /* Update the floating-point exception summary */
756
    env->fpscr |= 1 << FPSCR_FX;
757
    if (fpscr_xe != 0) {
758
        /* Update the floating-point enabled exception summary */
759
        env->fpscr |= 1 << FPSCR_FEX;
760
        /* We must update the target FPR before raising the exception */
761
        env->exception_index = POWERPC_EXCP_PROGRAM;
762
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
763
    }
764
}
765

    
766
static always_inline void fpscr_set_rounding_mode (void)
767
{
768
    int rnd_type;
769

    
770
    /* Set rounding mode */
771
    switch (fpscr_rn) {
772
    case 0:
773
        /* Best approximation (round to nearest) */
774
        rnd_type = float_round_nearest_even;
775
        break;
776
    case 1:
777
        /* Smaller magnitude (round toward zero) */
778
        rnd_type = float_round_to_zero;
779
        break;
780
    case 2:
781
        /* Round toward +infinite */
782
        rnd_type = float_round_up;
783
        break;
784
    default:
785
    case 3:
786
        /* Round toward -infinite */
787
        rnd_type = float_round_down;
788
        break;
789
    }
790
    set_float_rounding_mode(rnd_type, &env->fp_status);
791
}
792

    
793
void helper_fpscr_clrbit (uint32_t bit)
794
{
795
    int prev;
796

    
797
    prev = (env->fpscr >> bit) & 1;
798
    env->fpscr &= ~(1 << bit);
799
    if (prev == 1) {
800
        switch (bit) {
801
        case FPSCR_RN1:
802
        case FPSCR_RN:
803
            fpscr_set_rounding_mode();
804
            break;
805
        default:
806
            break;
807
        }
808
    }
809
}
810

    
811
void helper_fpscr_setbit (uint32_t bit)
812
{
813
    int prev;
814

    
815
    prev = (env->fpscr >> bit) & 1;
816
    env->fpscr |= 1 << bit;
817
    if (prev == 0) {
818
        switch (bit) {
819
        case FPSCR_VX:
820
            env->fpscr |= 1 << FPSCR_FX;
821
            if (fpscr_ve)
822
                goto raise_ve;
823
        case FPSCR_OX:
824
            env->fpscr |= 1 << FPSCR_FX;
825
            if (fpscr_oe)
826
                goto raise_oe;
827
            break;
828
        case FPSCR_UX:
829
            env->fpscr |= 1 << FPSCR_FX;
830
            if (fpscr_ue)
831
                goto raise_ue;
832
            break;
833
        case FPSCR_ZX:
834
            env->fpscr |= 1 << FPSCR_FX;
835
            if (fpscr_ze)
836
                goto raise_ze;
837
            break;
838
        case FPSCR_XX:
839
            env->fpscr |= 1 << FPSCR_FX;
840
            if (fpscr_xe)
841
                goto raise_xe;
842
            break;
843
        case FPSCR_VXSNAN:
844
        case FPSCR_VXISI:
845
        case FPSCR_VXIDI:
846
        case FPSCR_VXZDZ:
847
        case FPSCR_VXIMZ:
848
        case FPSCR_VXVC:
849
        case FPSCR_VXSOFT:
850
        case FPSCR_VXSQRT:
851
        case FPSCR_VXCVI:
852
            env->fpscr |= 1 << FPSCR_VX;
853
            env->fpscr |= 1 << FPSCR_FX;
854
            if (fpscr_ve != 0)
855
                goto raise_ve;
856
            break;
857
        case FPSCR_VE:
858
            if (fpscr_vx != 0) {
859
            raise_ve:
860
                env->error_code = POWERPC_EXCP_FP;
861
                if (fpscr_vxsnan)
862
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
863
                if (fpscr_vxisi)
864
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
865
                if (fpscr_vxidi)
866
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
867
                if (fpscr_vxzdz)
868
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
869
                if (fpscr_vximz)
870
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
871
                if (fpscr_vxvc)
872
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
873
                if (fpscr_vxsoft)
874
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
875
                if (fpscr_vxsqrt)
876
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
877
                if (fpscr_vxcvi)
878
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
879
                goto raise_excp;
880
            }
881
            break;
882
        case FPSCR_OE:
883
            if (fpscr_ox != 0) {
884
            raise_oe:
885
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
886
                goto raise_excp;
887
            }
888
            break;
889
        case FPSCR_UE:
890
            if (fpscr_ux != 0) {
891
            raise_ue:
892
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
893
                goto raise_excp;
894
            }
895
            break;
896
        case FPSCR_ZE:
897
            if (fpscr_zx != 0) {
898
            raise_ze:
899
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
900
                goto raise_excp;
901
            }
902
            break;
903
        case FPSCR_XE:
904
            if (fpscr_xx != 0) {
905
            raise_xe:
906
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
907
                goto raise_excp;
908
            }
909
            break;
910
        case FPSCR_RN1:
911
        case FPSCR_RN:
912
            fpscr_set_rounding_mode();
913
            break;
914
        default:
915
            break;
916
        raise_excp:
917
            /* Update the floating-point enabled exception summary */
918
            env->fpscr |= 1 << FPSCR_FEX;
919
                /* We have to update Rc1 before raising the exception */
920
            env->exception_index = POWERPC_EXCP_PROGRAM;
921
            break;
922
        }
923
    }
924
}
925

    
926
void helper_store_fpscr (uint64_t arg, uint32_t mask)
927
{
928
    /*
929
     * We use only the 32 LSB of the incoming fpr
930
     */
931
    uint32_t prev, new;
932
    int i;
933

    
934
    prev = env->fpscr;
935
    new = (uint32_t)arg;
936
    new &= ~0x60000000;
937
    new |= prev & 0x60000000;
938
    for (i = 0; i < 8; i++) {
939
        if (mask & (1 << i)) {
940
            env->fpscr &= ~(0xF << (4 * i));
941
            env->fpscr |= new & (0xF << (4 * i));
942
        }
943
    }
944
    /* Update VX and FEX */
945
    if (fpscr_ix != 0)
946
        env->fpscr |= 1 << FPSCR_VX;
947
    else
948
        env->fpscr &= ~(1 << FPSCR_VX);
949
    if ((fpscr_ex & fpscr_eex) != 0) {
950
        env->fpscr |= 1 << FPSCR_FEX;
951
        env->exception_index = POWERPC_EXCP_PROGRAM;
952
        /* XXX: we should compute it properly */
953
        env->error_code = POWERPC_EXCP_FP;
954
    }
955
    else
956
        env->fpscr &= ~(1 << FPSCR_FEX);
957
    fpscr_set_rounding_mode();
958
}
959

    
960
void helper_float_check_status (void)
961
{
962
#ifdef CONFIG_SOFTFLOAT
963
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
964
        (env->error_code & POWERPC_EXCP_FP)) {
965
        /* Differred floating-point exception after target FPR update */
966
        if (msr_fe0 != 0 || msr_fe1 != 0)
967
            helper_raise_exception_err(env->exception_index, env->error_code);
968
    } else {
969
        int status = get_float_exception_flags(&env->fp_status);
970
        if (status & float_flag_divbyzero) {
971
            float_zero_divide_excp();
972
        } else if (status & float_flag_overflow) {
973
            float_overflow_excp();
974
        } else if (status & float_flag_underflow) {
975
            float_underflow_excp();
976
        } else if (status & float_flag_inexact) {
977
            float_inexact_excp();
978
        }
979
    }
980
#else
981
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
982
        (env->error_code & POWERPC_EXCP_FP)) {
983
        /* Differred floating-point exception after target FPR update */
984
        if (msr_fe0 != 0 || msr_fe1 != 0)
985
            helper_raise_exception_err(env->exception_index, env->error_code);
986
    }
987
#endif
988
}
989

    
990
#ifdef CONFIG_SOFTFLOAT
991
void helper_reset_fpstatus (void)
992
{
993
    set_float_exception_flags(0, &env->fp_status);
994
}
995
#endif
996

    
997
/* fadd - fadd. */
998
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
999
{
1000
    CPU_DoubleU farg1, farg2;
1001

    
1002
    farg1.ll = arg1;
1003
    farg2.ll = arg2;
1004
#if USE_PRECISE_EMULATION
1005
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1006
                 float64_is_signaling_nan(farg2.d))) {
1007
        /* sNaN addition */
1008
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1009
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1010
                      float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1011
        /* Magnitude subtraction of infinities */
1012
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1013
    } else {
1014
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1015
    }
1016
#else
1017
    farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1018
#endif
1019
    return farg1.ll;
1020
}
1021

    
1022
/* fsub - fsub. */
1023
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1024
{
1025
    CPU_DoubleU farg1, farg2;
1026

    
1027
    farg1.ll = arg1;
1028
    farg2.ll = arg2;
1029
#if USE_PRECISE_EMULATION
1030
{
1031
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1032
                 float64_is_signaling_nan(farg2.d))) {
1033
        /* sNaN subtraction */
1034
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1035
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1036
                      float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1037
        /* Magnitude subtraction of infinities */
1038
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1039
    } else {
1040
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1041
    }
1042
}
1043
#else
1044
    farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1045
#endif
1046
    return farg1.ll;
1047
}
1048

    
1049
/* fmul - fmul. */
1050
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1051
{
1052
    CPU_DoubleU farg1, farg2;
1053

    
1054
    farg1.ll = arg1;
1055
    farg2.ll = arg2;
1056
#if USE_PRECISE_EMULATION
1057
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1058
                 float64_is_signaling_nan(farg2.d))) {
1059
        /* sNaN multiplication */
1060
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1061
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1062
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1063
        /* Multiplication of zero by infinity */
1064
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1065
    } else {
1066
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1067
    }
1068
#else
1069
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1070
#endif
1071
    return farg1.ll;
1072
}
1073

    
1074
/* fdiv - fdiv. */
1075
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1076
{
1077
    CPU_DoubleU farg1, farg2;
1078

    
1079
    farg1.ll = arg1;
1080
    farg2.ll = arg2;
1081
#if USE_PRECISE_EMULATION
1082
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1083
                 float64_is_signaling_nan(farg2.d))) {
1084
        /* sNaN division */
1085
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1086
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1087
        /* Division of infinity by infinity */
1088
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1089
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1090
        /* Division of zero by zero */
1091
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1092
    } else {
1093
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1094
    }
1095
#else
1096
    farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1097
#endif
1098
    return farg1.ll;
1099
}
1100

    
1101
/* fabs */
1102
uint64_t helper_fabs (uint64_t arg)
1103
{
1104
    CPU_DoubleU farg;
1105

    
1106
    farg.ll = arg;
1107
    farg.d = float64_abs(farg.d);
1108
    return farg.ll;
1109
}
1110

    
1111
/* fnabs */
1112
uint64_t helper_fnabs (uint64_t arg)
1113
{
1114
    CPU_DoubleU farg;
1115

    
1116
    farg.ll = arg;
1117
    farg.d = float64_abs(farg.d);
1118
    farg.d = float64_chs(farg.d);
1119
    return farg.ll;
1120
}
1121

    
1122
/* fneg */
1123
uint64_t helper_fneg (uint64_t arg)
1124
{
1125
    CPU_DoubleU farg;
1126

    
1127
    farg.ll = arg;
1128
    farg.d = float64_chs(farg.d);
1129
    return farg.ll;
1130
}
1131

    
1132
/* fctiw - fctiw. */
1133
uint64_t helper_fctiw (uint64_t arg)
1134
{
1135
    CPU_DoubleU farg;
1136
    farg.ll = arg;
1137

    
1138
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1139
        /* sNaN conversion */
1140
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1141
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1142
        /* qNan / infinity conversion */
1143
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1144
    } else {
1145
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1146
#if USE_PRECISE_EMULATION
1147
        /* XXX: higher bits are not supposed to be significant.
1148
         *     to make tests easier, return the same as a real PowerPC 750
1149
         */
1150
        farg.ll |= 0xFFF80000ULL << 32;
1151
#endif
1152
    }
1153
    return farg.ll;
1154
}
1155

    
1156
/* fctiwz - fctiwz. */
1157
uint64_t helper_fctiwz (uint64_t arg)
1158
{
1159
    CPU_DoubleU farg;
1160
    farg.ll = arg;
1161

    
1162
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1163
        /* sNaN conversion */
1164
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1165
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1166
        /* qNan / infinity conversion */
1167
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1168
    } else {
1169
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1170
#if USE_PRECISE_EMULATION
1171
        /* XXX: higher bits are not supposed to be significant.
1172
         *     to make tests easier, return the same as a real PowerPC 750
1173
         */
1174
        farg.ll |= 0xFFF80000ULL << 32;
1175
#endif
1176
    }
1177
    return farg.ll;
1178
}
1179

    
1180
#if defined(TARGET_PPC64)
1181
/* fcfid - fcfid. */
1182
uint64_t helper_fcfid (uint64_t arg)
1183
{
1184
    CPU_DoubleU farg;
1185
    farg.d = int64_to_float64(arg, &env->fp_status);
1186
    return farg.ll;
1187
}
1188

    
1189
/* fctid - fctid. */
1190
uint64_t helper_fctid (uint64_t arg)
1191
{
1192
    CPU_DoubleU farg;
1193
    farg.ll = arg;
1194

    
1195
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1196
        /* sNaN conversion */
1197
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1198
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1199
        /* qNan / infinity conversion */
1200
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1201
    } else {
1202
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1203
    }
1204
    return farg.ll;
1205
}
1206

    
1207
/* fctidz - fctidz. */
1208
uint64_t helper_fctidz (uint64_t arg)
1209
{
1210
    CPU_DoubleU farg;
1211
    farg.ll = arg;
1212

    
1213
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1214
        /* sNaN conversion */
1215
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1216
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1217
        /* qNan / infinity conversion */
1218
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1219
    } else {
1220
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1221
    }
1222
    return farg.ll;
1223
}
1224

    
1225
#endif
1226

    
1227
static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1228
{
1229
    CPU_DoubleU farg;
1230
    farg.ll = arg;
1231

    
1232
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1233
        /* sNaN round */
1234
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1235
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1236
        /* qNan / infinity round */
1237
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1238
    } else {
1239
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1240
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1241
        /* Restore rounding mode from FPSCR */
1242
        fpscr_set_rounding_mode();
1243
    }
1244
    return farg.ll;
1245
}
1246

    
1247
uint64_t helper_frin (uint64_t arg)
1248
{
1249
    return do_fri(arg, float_round_nearest_even);
1250
}
1251

    
1252
uint64_t helper_friz (uint64_t arg)
1253
{
1254
    return do_fri(arg, float_round_to_zero);
1255
}
1256

    
1257
uint64_t helper_frip (uint64_t arg)
1258
{
1259
    return do_fri(arg, float_round_up);
1260
}
1261

    
1262
uint64_t helper_frim (uint64_t arg)
1263
{
1264
    return do_fri(arg, float_round_down);
1265
}
1266

    
1267
/* fmadd - fmadd. */
1268
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1269
{
1270
    CPU_DoubleU farg1, farg2, farg3;
1271

    
1272
    farg1.ll = arg1;
1273
    farg2.ll = arg2;
1274
    farg3.ll = arg3;
1275
#if USE_PRECISE_EMULATION
1276
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1277
                 float64_is_signaling_nan(farg2.d) ||
1278
                 float64_is_signaling_nan(farg3.d))) {
1279
        /* sNaN operation */
1280
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1281
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1282
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1283
        /* Multiplication of zero by infinity */
1284
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1285
    } else {
1286
#ifdef FLOAT128
1287
        /* This is the way the PowerPC specification defines it */
1288
        float128 ft0_128, ft1_128;
1289

    
1290
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1291
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1292
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1293
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1294
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1295
            /* Magnitude subtraction of infinities */
1296
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1297
        } else {
1298
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1299
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1300
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1301
        }
1302
#else
1303
        /* This is OK on x86 hosts */
1304
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1305
#endif
1306
    }
1307
#else
1308
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1309
    farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1310
#endif
1311
    return farg1.ll;
1312
}
1313

    
1314
/* fmsub - fmsub. */
1315
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1316
{
1317
    CPU_DoubleU farg1, farg2, farg3;
1318

    
1319
    farg1.ll = arg1;
1320
    farg2.ll = arg2;
1321
    farg3.ll = arg3;
1322
#if USE_PRECISE_EMULATION
1323
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1324
                 float64_is_signaling_nan(farg2.d) ||
1325
                 float64_is_signaling_nan(farg3.d))) {
1326
        /* sNaN operation */
1327
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1328
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1329
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1330
        /* Multiplication of zero by infinity */
1331
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1332
    } else {
1333
#ifdef FLOAT128
1334
        /* This is the way the PowerPC specification defines it */
1335
        float128 ft0_128, ft1_128;
1336

    
1337
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1338
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1339
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1340
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1341
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1342
            /* Magnitude subtraction of infinities */
1343
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1344
        } else {
1345
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1346
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1347
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1348
        }
1349
#else
1350
        /* This is OK on x86 hosts */
1351
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1352
#endif
1353
    }
1354
#else
1355
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1356
    farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1357
#endif
1358
    return farg1.ll;
1359
}
1360

    
1361
/* fnmadd - fnmadd. */
1362
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1363
{
1364
    CPU_DoubleU farg1, farg2, farg3;
1365

    
1366
    farg1.ll = arg1;
1367
    farg2.ll = arg2;
1368
    farg3.ll = arg3;
1369

    
1370
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1371
                 float64_is_signaling_nan(farg2.d) ||
1372
                 float64_is_signaling_nan(farg3.d))) {
1373
        /* sNaN operation */
1374
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1375
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1376
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1377
        /* Multiplication of zero by infinity */
1378
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1379
    } else {
1380
#if USE_PRECISE_EMULATION
1381
#ifdef FLOAT128
1382
        /* This is the way the PowerPC specification defines it */
1383
        float128 ft0_128, ft1_128;
1384

    
1385
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1386
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1387
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1388
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1389
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1390
            /* Magnitude subtraction of infinities */
1391
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1392
        } else {
1393
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1394
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1395
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1396
        }
1397
#else
1398
        /* This is OK on x86 hosts */
1399
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1400
#endif
1401
#else
1402
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1403
        farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1404
#endif
1405
        if (likely(!float64_is_nan(farg1.d)))
1406
            farg1.d = float64_chs(farg1.d);
1407
    }
1408
    return farg1.ll;
1409
}
1410

    
1411
/* fnmsub - fnmsub. */
1412
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1413
{
1414
    CPU_DoubleU farg1, farg2, farg3;
1415

    
1416
    farg1.ll = arg1;
1417
    farg2.ll = arg2;
1418
    farg3.ll = arg3;
1419

    
1420
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1421
                 float64_is_signaling_nan(farg2.d) ||
1422
                 float64_is_signaling_nan(farg3.d))) {
1423
        /* sNaN operation */
1424
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1425
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1426
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1427
        /* Multiplication of zero by infinity */
1428
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1429
    } else {
1430
#if USE_PRECISE_EMULATION
1431
#ifdef FLOAT128
1432
        /* This is the way the PowerPC specification defines it */
1433
        float128 ft0_128, ft1_128;
1434

    
1435
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1436
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1437
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1438
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1439
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1440
            /* Magnitude subtraction of infinities */
1441
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1442
        } else {
1443
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1444
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1445
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1446
        }
1447
#else
1448
        /* This is OK on x86 hosts */
1449
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1450
#endif
1451
#else
1452
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1453
        farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1454
#endif
1455
        if (likely(!float64_is_nan(farg1.d)))
1456
            farg1.d = float64_chs(farg1.d);
1457
    }
1458
    return farg1.ll;
1459
}
1460

    
1461
/* frsp - frsp. */
1462
uint64_t helper_frsp (uint64_t arg)
1463
{
1464
    CPU_DoubleU farg;
1465
    float32 f32;
1466
    farg.ll = arg;
1467

    
1468
#if USE_PRECISE_EMULATION
1469
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1470
        /* sNaN square root */
1471
       farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1472
    } else {
1473
       f32 = float64_to_float32(farg.d, &env->fp_status);
1474
       farg.d = float32_to_float64(f32, &env->fp_status);
1475
    }
1476
#else
1477
    f32 = float64_to_float32(farg.d, &env->fp_status);
1478
    farg.d = float32_to_float64(f32, &env->fp_status);
1479
#endif
1480
    return farg.ll;
1481
}
1482

    
1483
/* fsqrt - fsqrt. */
1484
uint64_t helper_fsqrt (uint64_t arg)
1485
{
1486
    CPU_DoubleU farg;
1487
    farg.ll = arg;
1488

    
1489
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1490
        /* sNaN square root */
1491
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1492
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1493
        /* Square root of a negative nonzero number */
1494
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1495
    } else {
1496
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1497
    }
1498
    return farg.ll;
1499
}
1500

    
1501
/* fre - fre. */
1502
uint64_t helper_fre (uint64_t arg)
1503
{
1504
    CPU_DoubleU fone, farg;
1505
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1506
    farg.ll = arg;
1507

    
1508
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1509
        /* sNaN reciprocal */
1510
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1511
    } else {
1512
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1513
    }
1514
    return farg.d;
1515
}
1516

    
1517
/* fres - fres. */
1518
uint64_t helper_fres (uint64_t arg)
1519
{
1520
    CPU_DoubleU fone, farg;
1521
    float32 f32;
1522
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1523
    farg.ll = arg;
1524

    
1525
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1526
        /* sNaN reciprocal */
1527
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1528
    } else {
1529
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1530
        f32 = float64_to_float32(farg.d, &env->fp_status);
1531
        farg.d = float32_to_float64(f32, &env->fp_status);
1532
    }
1533
    return farg.ll;
1534
}
1535

    
1536
/* frsqrte  - frsqrte. */
1537
uint64_t helper_frsqrte (uint64_t arg)
1538
{
1539
    CPU_DoubleU fone, farg;
1540
    float32 f32;
1541
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1542
    farg.ll = arg;
1543

    
1544
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1545
        /* sNaN reciprocal square root */
1546
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1547
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1548
        /* Reciprocal square root of a negative nonzero number */
1549
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1550
    } else {
1551
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1552
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1553
        f32 = float64_to_float32(farg.d, &env->fp_status);
1554
        farg.d = float32_to_float64(f32, &env->fp_status);
1555
    }
1556
    return farg.ll;
1557
}
1558

    
1559
/* fsel - fsel. */
1560
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1561
{
1562
    CPU_DoubleU farg1;
1563

    
1564
    farg1.ll = arg1;
1565

    
1566
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1567
        return arg2;
1568
    else
1569
        return arg3;
1570
}
1571

    
1572
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1573
{
1574
    CPU_DoubleU farg1, farg2;
1575
    uint32_t ret = 0;
1576
    farg1.ll = arg1;
1577
    farg2.ll = arg2;
1578

    
1579
    if (unlikely(float64_is_nan(farg1.d) ||
1580
                 float64_is_nan(farg2.d))) {
1581
        ret = 0x01UL;
1582
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1583
        ret = 0x08UL;
1584
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1585
        ret = 0x04UL;
1586
    } else {
1587
        ret = 0x02UL;
1588
    }
1589

    
1590
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1591
    env->fpscr |= ret << FPSCR_FPRF;
1592
    env->crf[crfD] = ret;
1593
    if (unlikely(ret == 0x01UL
1594
                 && (float64_is_signaling_nan(farg1.d) ||
1595
                     float64_is_signaling_nan(farg2.d)))) {
1596
        /* sNaN comparison */
1597
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1598
    }
1599
}
1600

    
1601
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1602
{
1603
    CPU_DoubleU farg1, farg2;
1604
    uint32_t ret = 0;
1605
    farg1.ll = arg1;
1606
    farg2.ll = arg2;
1607

    
1608
    if (unlikely(float64_is_nan(farg1.d) ||
1609
                 float64_is_nan(farg2.d))) {
1610
        ret = 0x01UL;
1611
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1612
        ret = 0x08UL;
1613
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1614
        ret = 0x04UL;
1615
    } else {
1616
        ret = 0x02UL;
1617
    }
1618

    
1619
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1620
    env->fpscr |= ret << FPSCR_FPRF;
1621
    env->crf[crfD] = ret;
1622
    if (unlikely (ret == 0x01UL)) {
1623
        if (float64_is_signaling_nan(farg1.d) ||
1624
            float64_is_signaling_nan(farg2.d)) {
1625
            /* sNaN comparison */
1626
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1627
                                  POWERPC_EXCP_FP_VXVC);
1628
        } else {
1629
            /* qNaN comparison */
1630
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1631
        }
1632
    }
1633
}
1634

    
1635
#if !defined (CONFIG_USER_ONLY)
1636
void helper_store_msr (target_ulong val)
1637
{
1638
    val = hreg_store_msr(env, val, 0);
1639
    if (val != 0) {
1640
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1641
        helper_raise_exception(val);
1642
    }
1643
}
1644

    
1645
static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1646
                                    target_ulong msrm, int keep_msrh)
1647
{
1648
#if defined(TARGET_PPC64)
1649
    if (msr & (1ULL << MSR_SF)) {
1650
        nip = (uint64_t)nip;
1651
        msr &= (uint64_t)msrm;
1652
    } else {
1653
        nip = (uint32_t)nip;
1654
        msr = (uint32_t)(msr & msrm);
1655
        if (keep_msrh)
1656
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1657
    }
1658
#else
1659
    nip = (uint32_t)nip;
1660
    msr &= (uint32_t)msrm;
1661
#endif
1662
    /* XXX: beware: this is false if VLE is supported */
1663
    env->nip = nip & ~((target_ulong)0x00000003);
1664
    hreg_store_msr(env, msr, 1);
1665
#if defined (DEBUG_OP)
1666
    cpu_dump_rfi(env->nip, env->msr);
1667
#endif
1668
    /* No need to raise an exception here,
1669
     * as rfi is always the last insn of a TB
1670
     */
1671
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1672
}
1673

    
1674
void helper_rfi (void)
1675
{
1676
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1677
           ~((target_ulong)0xFFFF0000), 1);
1678
}
1679

    
1680
#if defined(TARGET_PPC64)
1681
void helper_rfid (void)
1682
{
1683
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1684
           ~((target_ulong)0xFFFF0000), 0);
1685
}
1686

    
1687
void helper_hrfid (void)
1688
{
1689
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1690
           ~((target_ulong)0xFFFF0000), 0);
1691
}
1692
#endif
1693
#endif
1694

    
1695
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1696
{
1697
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1698
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1699
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1700
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1701
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1702
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1703
    }
1704
}
1705

    
1706
#if defined(TARGET_PPC64)
1707
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1708
{
1709
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1710
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1711
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1712
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1713
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1714
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1715
}
1716
#endif
1717

    
1718
/*****************************************************************************/
1719
/* PowerPC 601 specific instructions (POWER bridge) */
1720

    
1721
target_ulong helper_clcs (uint32_t arg)
1722
{
1723
    switch (arg) {
1724
    case 0x0CUL:
1725
        /* Instruction cache line size */
1726
        return env->icache_line_size;
1727
        break;
1728
    case 0x0DUL:
1729
        /* Data cache line size */
1730
        return env->dcache_line_size;
1731
        break;
1732
    case 0x0EUL:
1733
        /* Minimum cache line size */
1734
        return (env->icache_line_size < env->dcache_line_size) ?
1735
                env->icache_line_size : env->dcache_line_size;
1736
        break;
1737
    case 0x0FUL:
1738
        /* Maximum cache line size */
1739
        return (env->icache_line_size > env->dcache_line_size) ?
1740
                env->icache_line_size : env->dcache_line_size;
1741
        break;
1742
    default:
1743
        /* Undefined */
1744
        return 0;
1745
        break;
1746
    }
1747
}
1748

    
1749
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1750
{
1751
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1752

    
1753
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1754
        (int32_t)arg2 == 0) {
1755
        env->spr[SPR_MQ] = 0;
1756
        return INT32_MIN;
1757
    } else {
1758
        env->spr[SPR_MQ] = tmp % arg2;
1759
        return  tmp / (int32_t)arg2;
1760
    }
1761
}
1762

    
1763
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1764
{
1765
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1766

    
1767
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1768
        (int32_t)arg2 == 0) {
1769
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1770
        env->spr[SPR_MQ] = 0;
1771
        return INT32_MIN;
1772
    } else {
1773
        env->spr[SPR_MQ] = tmp % arg2;
1774
        tmp /= (int32_t)arg2;
1775
        if ((int32_t)tmp != tmp) {
1776
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1777
        } else {
1778
            env->xer &= ~(1 << XER_OV);
1779
        }
1780
        return tmp;
1781
    }
1782
}
1783

    
1784
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1785
{
1786
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1787
        (int32_t)arg2 == 0) {
1788
        env->spr[SPR_MQ] = 0;
1789
        return INT32_MIN;
1790
    } else {
1791
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1792
        return (int32_t)arg1 / (int32_t)arg2;
1793
    }
1794
}
1795

    
1796
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1797
{
1798
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1799
        (int32_t)arg2 == 0) {
1800
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1801
        env->spr[SPR_MQ] = 0;
1802
        return INT32_MIN;
1803
    } else {
1804
        env->xer &= ~(1 << XER_OV);
1805
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1806
        return (int32_t)arg1 / (int32_t)arg2;
1807
    }
1808
}
1809

    
1810
#if !defined (CONFIG_USER_ONLY)
1811
target_ulong helper_rac (target_ulong addr)
1812
{
1813
    mmu_ctx_t ctx;
1814
    int nb_BATs;
1815
    target_ulong ret = 0;
1816

    
1817
    /* We don't have to generate many instances of this instruction,
1818
     * as rac is supervisor only.
1819
     */
1820
    /* XXX: FIX THIS: Pretend we have no BAT */
1821
    nb_BATs = env->nb_BATs;
1822
    env->nb_BATs = 0;
1823
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1824
        ret = ctx.raddr;
1825
    env->nb_BATs = nb_BATs;
1826
    return ret;
1827
}
1828

    
1829
void helper_rfsvc (void)
1830
{
1831
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1832
}
1833
#endif
1834

    
1835
/*****************************************************************************/
1836
/* 602 specific instructions */
1837
/* mfrom is the most crazy instruction ever seen, imho ! */
1838
/* Real implementation uses a ROM table. Do the same */
1839
/* Extremly decomposed:
1840
 *                      -arg / 256
1841
 * return 256 * log10(10           + 1.0) + 0.5
1842
 */
1843
#if !defined (CONFIG_USER_ONLY)
1844
target_ulong helper_602_mfrom (target_ulong arg)
1845
{
1846
    if (likely(arg < 602)) {
1847
#include "mfrom_table.c"
1848
        return mfrom_ROM_table[arg];
1849
    } else {
1850
        return 0;
1851
    }
1852
}
1853
#endif
1854

    
1855
/*****************************************************************************/
1856
/* Embedded PowerPC specific helpers */
1857

    
1858
/* XXX: to be improved to check access rights when in user-mode */
1859
target_ulong helper_load_dcr (target_ulong dcrn)
1860
{
1861
    target_ulong val = 0;
1862

    
1863
    if (unlikely(env->dcr_env == NULL)) {
1864
        qemu_log("No DCR environment\n");
1865
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1866
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1867
    } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1868
        qemu_log("DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1869
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1870
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1871
    }
1872
    return val;
1873
}
1874

    
1875
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1876
{
1877
    if (unlikely(env->dcr_env == NULL)) {
1878
        qemu_log("No DCR environment\n");
1879
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1880
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1881
    } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1882
        qemu_log("DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1883
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1884
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1885
    }
1886
}
1887

    
1888
#if !defined(CONFIG_USER_ONLY)
1889
void helper_40x_rfci (void)
1890
{
1891
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1892
           ~((target_ulong)0xFFFF0000), 0);
1893
}
1894

    
1895
void helper_rfci (void)
1896
{
1897
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1898
           ~((target_ulong)0x3FFF0000), 0);
1899
}
1900

    
1901
void helper_rfdi (void)
1902
{
1903
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1904
           ~((target_ulong)0x3FFF0000), 0);
1905
}
1906

    
1907
void helper_rfmci (void)
1908
{
1909
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1910
           ~((target_ulong)0x3FFF0000), 0);
1911
}
1912
#endif
1913

    
1914
/* 440 specific */
1915
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1916
{
1917
    target_ulong mask;
1918
    int i;
1919

    
1920
    i = 1;
1921
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1922
        if ((high & mask) == 0) {
1923
            if (update_Rc) {
1924
                env->crf[0] = 0x4;
1925
            }
1926
            goto done;
1927
        }
1928
        i++;
1929
    }
1930
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1931
        if ((low & mask) == 0) {
1932
            if (update_Rc) {
1933
                env->crf[0] = 0x8;
1934
            }
1935
            goto done;
1936
        }
1937
        i++;
1938
    }
1939
    if (update_Rc) {
1940
        env->crf[0] = 0x2;
1941
    }
1942
 done:
1943
    env->xer = (env->xer & ~0x7F) | i;
1944
    if (update_Rc) {
1945
        env->crf[0] |= xer_so;
1946
    }
1947
    return i;
1948
}
1949

    
1950
/*****************************************************************************/
1951
/* Altivec extension helpers */
1952
#if defined(WORDS_BIGENDIAN)
1953
#define HI_IDX 0
1954
#define LO_IDX 1
1955
#else
1956
#define HI_IDX 1
1957
#define LO_IDX 0
1958
#endif
1959

    
1960
#if defined(WORDS_BIGENDIAN)
1961
#define VECTOR_FOR_INORDER_I(index, element)            \
1962
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1963
#else
1964
#define VECTOR_FOR_INORDER_I(index, element)            \
1965
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1966
#endif
1967

    
1968
/* Saturating arithmetic helpers.  */
1969
#define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1970
    static always_inline to_type cvt##from##to (from_type x, int *sat)  \
1971
    {                                                                   \
1972
        to_type r;                                                      \
1973
        if (use_min && x < min) {                                       \
1974
            r = min;                                                    \
1975
            *sat = 1;                                                   \
1976
        } else if (use_max && x > max) {                                \
1977
            r = max;                                                    \
1978
            *sat = 1;                                                   \
1979
        } else {                                                        \
1980
            r = x;                                                      \
1981
        }                                                               \
1982
        return r;                                                       \
1983
    }
1984
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX, 1, 1)
1985
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX, 1, 1)
1986
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX, 1, 1)
1987
SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
1988
SATCVT(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX, 0, 1)
1989
SATCVT(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX, 0, 1)
1990
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX, 1, 1)
1991
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX, 1, 1)
1992
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX, 1, 1)
1993
#undef SATCVT
1994

    
1995
#define LVE(name, access, swap, element)                        \
1996
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
1997
    {                                                           \
1998
        size_t n_elems = ARRAY_SIZE(r->element);                \
1999
        int adjust = HI_IDX*(n_elems-1);                        \
2000
        int sh = sizeof(r->element[0]) >> 1;                    \
2001
        int index = (addr & 0xf) >> sh;                         \
2002
        if(msr_le) {                                            \
2003
            r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2004
        } else {                                                        \
2005
            r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2006
        }                                                               \
2007
    }
2008
#define I(x) (x)
2009
LVE(lvebx, ldub, I, u8)
2010
LVE(lvehx, lduw, bswap16, u16)
2011
LVE(lvewx, ldl, bswap32, u32)
2012
#undef I
2013
#undef LVE
2014

    
2015
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2016
{
2017
    int i, j = (sh & 0xf);
2018

    
2019
    VECTOR_FOR_INORDER_I (i, u8) {
2020
        r->u8[i] = j++;
2021
    }
2022
}
2023

    
2024
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2025
{
2026
    int i, j = 0x10 - (sh & 0xf);
2027

    
2028
    VECTOR_FOR_INORDER_I (i, u8) {
2029
        r->u8[i] = j++;
2030
    }
2031
}
2032

    
2033
#define STVE(name, access, swap, element)                       \
2034
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2035
    {                                                           \
2036
        size_t n_elems = ARRAY_SIZE(r->element);                \
2037
        int adjust = HI_IDX*(n_elems-1);                        \
2038
        int sh = sizeof(r->element[0]) >> 1;                    \
2039
        int index = (addr & 0xf) >> sh;                         \
2040
        if(msr_le) {                                            \
2041
            access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2042
        } else {                                                        \
2043
            access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2044
        }                                                               \
2045
    }
2046
#define I(x) (x)
2047
STVE(stvebx, stb, I, u8)
2048
STVE(stvehx, stw, bswap16, u16)
2049
STVE(stvewx, stl, bswap32, u32)
2050
#undef I
2051
#undef LVE
2052

    
2053
void helper_mtvscr (ppc_avr_t *r)
2054
{
2055
#if defined(WORDS_BIGENDIAN)
2056
    env->vscr = r->u32[3];
2057
#else
2058
    env->vscr = r->u32[0];
2059
#endif
2060
    set_flush_to_zero(vscr_nj, &env->vec_status);
2061
}
2062

    
2063
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2064
{
2065
    int i;
2066
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2067
        r->u32[i] = ~a->u32[i] < b->u32[i];
2068
    }
2069
}
2070

    
2071
#define VARITH_DO(name, op, element)        \
2072
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2073
{                                                                       \
2074
    int i;                                                              \
2075
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2076
        r->element[i] = a->element[i] op b->element[i];                 \
2077
    }                                                                   \
2078
}
2079
#define VARITH(suffix, element)                  \
2080
  VARITH_DO(add##suffix, +, element)             \
2081
  VARITH_DO(sub##suffix, -, element)
2082
VARITH(ubm, u8)
2083
VARITH(uhm, u16)
2084
VARITH(uwm, u32)
2085
#undef VARITH_DO
2086
#undef VARITH
2087

    
2088
#define VARITHSAT_CASE(type, op, cvt, element)                          \
2089
    {                                                                   \
2090
        type result = (type)a->element[i] op (type)b->element[i];       \
2091
        r->element[i] = cvt(result, &sat);                              \
2092
    }
2093

    
2094
#define VARITHSAT_DO(name, op, optype, cvt, element)                    \
2095
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2096
    {                                                                   \
2097
        int sat = 0;                                                    \
2098
        int i;                                                          \
2099
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2100
            switch (sizeof(r->element[0])) {                            \
2101
            case 1: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2102
            case 2: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2103
            case 4: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2104
            }                                                           \
2105
        }                                                               \
2106
        if (sat) {                                                      \
2107
            env->vscr |= (1 << VSCR_SAT);                               \
2108
        }                                                               \
2109
    }
2110
#define VARITHSAT_SIGNED(suffix, element, optype, cvt)        \
2111
    VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)    \
2112
    VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2113
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)       \
2114
    VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)     \
2115
    VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2116
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2117
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2118
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2119
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2120
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2121
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2122
#undef VARITHSAT_CASE
2123
#undef VARITHSAT_DO
2124
#undef VARITHSAT_SIGNED
2125
#undef VARITHSAT_UNSIGNED
2126

    
2127
#define VAVG_DO(name, element, etype)                                   \
2128
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2129
    {                                                                   \
2130
        int i;                                                          \
2131
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2132
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2133
            r->element[i] = x >> 1;                                     \
2134
        }                                                               \
2135
    }
2136

    
2137
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2138
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2139
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2140
VAVG(b, s8, int16_t, u8, uint16_t)
2141
VAVG(h, s16, int32_t, u16, uint32_t)
2142
VAVG(w, s32, int64_t, u32, uint64_t)
2143
#undef VAVG_DO
2144
#undef VAVG
2145

    
2146
#define VCMP_DO(suffix, compare, element, record)                       \
2147
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2148
    {                                                                   \
2149
        uint32_t ones = (uint32_t)-1;                                   \
2150
        uint32_t all = ones;                                            \
2151
        uint32_t none = 0;                                              \
2152
        int i;                                                          \
2153
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2154
            uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2155
            switch (sizeof (a->element[0])) {                           \
2156
            case 4: r->u32[i] = result; break;                          \
2157
            case 2: r->u16[i] = result; break;                          \
2158
            case 1: r->u8[i] = result; break;                           \
2159
            }                                                           \
2160
            all &= result;                                              \
2161
            none |= result;                                             \
2162
        }                                                               \
2163
        if (record) {                                                   \
2164
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2165
        }                                                               \
2166
    }
2167
#define VCMP(suffix, compare, element)          \
2168
    VCMP_DO(suffix, compare, element, 0)        \
2169
    VCMP_DO(suffix##_dot, compare, element, 1)
2170
VCMP(equb, ==, u8)
2171
VCMP(equh, ==, u16)
2172
VCMP(equw, ==, u32)
2173
VCMP(gtub, >, u8)
2174
VCMP(gtuh, >, u16)
2175
VCMP(gtuw, >, u32)
2176
VCMP(gtsb, >, s8)
2177
VCMP(gtsh, >, s16)
2178
VCMP(gtsw, >, s32)
2179
#undef VCMP_DO
2180
#undef VCMP
2181

    
2182
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2183
{
2184
    int sat = 0;
2185
    int i;
2186

    
2187
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2188
        int32_t prod = a->s16[i] * b->s16[i];
2189
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2190
        r->s16[i] = cvtswsh (t, &sat);
2191
    }
2192

    
2193
    if (sat) {
2194
        env->vscr |= (1 << VSCR_SAT);
2195
    }
2196
}
2197

    
2198
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2199
{
2200
    int sat = 0;
2201
    int i;
2202

    
2203
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2204
        int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2205
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2206
        r->s16[i] = cvtswsh (t, &sat);
2207
    }
2208

    
2209
    if (sat) {
2210
        env->vscr |= (1 << VSCR_SAT);
2211
    }
2212
}
2213

    
2214
#define VMINMAX_DO(name, compare, element)                              \
2215
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2216
    {                                                                   \
2217
        int i;                                                          \
2218
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2219
            if (a->element[i] compare b->element[i]) {                  \
2220
                r->element[i] = b->element[i];                          \
2221
            } else {                                                    \
2222
                r->element[i] = a->element[i];                          \
2223
            }                                                           \
2224
        }                                                               \
2225
    }
2226
#define VMINMAX(suffix, element)                \
2227
  VMINMAX_DO(min##suffix, >, element)           \
2228
  VMINMAX_DO(max##suffix, <, element)
2229
VMINMAX(sb, s8)
2230
VMINMAX(sh, s16)
2231
VMINMAX(sw, s32)
2232
VMINMAX(ub, u8)
2233
VMINMAX(uh, u16)
2234
VMINMAX(uw, u32)
2235
#undef VMINMAX_DO
2236
#undef VMINMAX
2237

    
2238
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2239
{
2240
    int i;
2241
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2242
        int32_t prod = a->s16[i] * b->s16[i];
2243
        r->s16[i] = (int16_t) (prod + c->s16[i]);
2244
    }
2245
}
2246

    
2247
#define VMRG_DO(name, element, highp)                                   \
2248
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2249
    {                                                                   \
2250
        ppc_avr_t result;                                               \
2251
        int i;                                                          \
2252
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2253
        for (i = 0; i < n_elems/2; i++) {                               \
2254
            if (highp) {                                                \
2255
                result.element[i*2+HI_IDX] = a->element[i];             \
2256
                result.element[i*2+LO_IDX] = b->element[i];             \
2257
            } else {                                                    \
2258
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2259
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2260
            }                                                           \
2261
        }                                                               \
2262
        *r = result;                                                    \
2263
    }
2264
#if defined(WORDS_BIGENDIAN)
2265
#define MRGHI 0
2266
#define MRGLO 1
2267
#else
2268
#define MRGHI 1
2269
#define MRGLO 0
2270
#endif
2271
#define VMRG(suffix, element)                   \
2272
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2273
  VMRG_DO(mrgh##suffix, element, MRGLO)
2274
VMRG(b, u8)
2275
VMRG(h, u16)
2276
VMRG(w, u32)
2277
#undef VMRG_DO
2278
#undef VMRG
2279
#undef MRGHI
2280
#undef MRGLO
2281

    
2282
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2283
{
2284
    int32_t prod[16];
2285
    int i;
2286

    
2287
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2288
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2289
    }
2290

    
2291
    VECTOR_FOR_INORDER_I(i, s32) {
2292
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2293
    }
2294
}
2295

    
2296
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2297
{
2298
    int32_t prod[8];
2299
    int i;
2300

    
2301
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2302
        prod[i] = a->s16[i] * b->s16[i];
2303
    }
2304

    
2305
    VECTOR_FOR_INORDER_I(i, s32) {
2306
        r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2307
    }
2308
}
2309

    
2310
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2311
{
2312
    int32_t prod[8];
2313
    int i;
2314
    int sat = 0;
2315

    
2316
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2317
        prod[i] = (int32_t)a->s16[i] * b->s16[i];
2318
    }
2319

    
2320
    VECTOR_FOR_INORDER_I (i, s32) {
2321
        int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2322
        r->u32[i] = cvtsdsw(t, &sat);
2323
    }
2324

    
2325
    if (sat) {
2326
        env->vscr |= (1 << VSCR_SAT);
2327
    }
2328
}
2329

    
2330
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2331
{
2332
    uint16_t prod[16];
2333
    int i;
2334

    
2335
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2336
        prod[i] = a->u8[i] * b->u8[i];
2337
    }
2338

    
2339
    VECTOR_FOR_INORDER_I(i, u32) {
2340
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2341
    }
2342
}
2343

    
2344
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2345
{
2346
    uint32_t prod[8];
2347
    int i;
2348

    
2349
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2350
        prod[i] = a->u16[i] * b->u16[i];
2351
    }
2352

    
2353
    VECTOR_FOR_INORDER_I(i, u32) {
2354
        r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2355
    }
2356
}
2357

    
2358
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2359
{
2360
    uint32_t prod[8];
2361
    int i;
2362
    int sat = 0;
2363

    
2364
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2365
        prod[i] = a->u16[i] * b->u16[i];
2366
    }
2367

    
2368
    VECTOR_FOR_INORDER_I (i, s32) {
2369
        uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2370
        r->u32[i] = cvtuduw(t, &sat);
2371
    }
2372

    
2373
    if (sat) {
2374
        env->vscr |= (1 << VSCR_SAT);
2375
    }
2376
}
2377

    
2378
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2379
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2380
    {                                                                   \
2381
        int i;                                                          \
2382
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2383
            if (evenp) {                                                \
2384
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2385
            } else {                                                    \
2386
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2387
            }                                                           \
2388
        }                                                               \
2389
    }
2390
#define VMUL(suffix, mul_element, prod_element) \
2391
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2392
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2393
VMUL(sb, s8, s16)
2394
VMUL(sh, s16, s32)
2395
VMUL(ub, u8, u16)
2396
VMUL(uh, u16, u32)
2397
#undef VMUL_DO
2398
#undef VMUL
2399

    
2400
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2401
{
2402
    ppc_avr_t result;
2403
    int i;
2404
    VECTOR_FOR_INORDER_I (i, u8) {
2405
        int s = c->u8[i] & 0x1f;
2406
#if defined(WORDS_BIGENDIAN)
2407
        int index = s & 0xf;
2408
#else
2409
        int index = 15 - (s & 0xf);
2410
#endif
2411
        if (s & 0x10) {
2412
            result.u8[i] = b->u8[index];
2413
        } else {
2414
            result.u8[i] = a->u8[index];
2415
        }
2416
    }
2417
    *r = result;
2418
}
2419

    
2420
#if defined(WORDS_BIGENDIAN)
2421
#define PKBIG 1
2422
#else
2423
#define PKBIG 0
2424
#endif
2425
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2426
{
2427
    int i, j;
2428
    ppc_avr_t result;
2429
#if defined(WORDS_BIGENDIAN)
2430
    const ppc_avr_t *x[2] = { a, b };
2431
#else
2432
    const ppc_avr_t *x[2] = { b, a };
2433
#endif
2434

    
2435
    VECTOR_FOR_INORDER_I (i, u64) {
2436
        VECTOR_FOR_INORDER_I (j, u32){
2437
            uint32_t e = x[i]->u32[j];
2438
            result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2439
                                 ((e >> 6) & 0x3e0) |
2440
                                 ((e >> 3) & 0x1f));
2441
        }
2442
    }
2443
    *r = result;
2444
}
2445

    
2446
#define VPK(suffix, from, to, cvt, dosat)       \
2447
    void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2448
    {                                                                   \
2449
        int i;                                                          \
2450
        int sat = 0;                                                    \
2451
        ppc_avr_t result;                                               \
2452
        ppc_avr_t *a0 = PKBIG ? a : b;                                  \
2453
        ppc_avr_t *a1 = PKBIG ? b : a;                                  \
2454
        VECTOR_FOR_INORDER_I (i, from) {                                \
2455
            result.to[i] = cvt(a0->from[i], &sat);                      \
2456
            result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
2457
        }                                                               \
2458
        *r = result;                                                    \
2459
        if (dosat && sat) {                                             \
2460
            env->vscr |= (1 << VSCR_SAT);                               \
2461
        }                                                               \
2462
    }
2463
#define I(x, y) (x)
2464
VPK(shss, s16, s8, cvtshsb, 1)
2465
VPK(shus, s16, u8, cvtshub, 1)
2466
VPK(swss, s32, s16, cvtswsh, 1)
2467
VPK(swus, s32, u16, cvtswuh, 1)
2468
VPK(uhus, u16, u8, cvtuhub, 1)
2469
VPK(uwus, u32, u16, cvtuwuh, 1)
2470
VPK(uhum, u16, u8, I, 0)
2471
VPK(uwum, u32, u16, I, 0)
2472
#undef I
2473
#undef VPK
2474
#undef PKBIG
2475

    
2476
#define VROTATE(suffix, element)                                        \
2477
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2478
    {                                                                   \
2479
        int i;                                                          \
2480
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2481
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2482
            unsigned int shift = b->element[i] & mask;                  \
2483
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2484
        }                                                               \
2485
    }
2486
VROTATE(b, u8)
2487
VROTATE(h, u16)
2488
VROTATE(w, u32)
2489
#undef VROTATE
2490

    
2491
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2492
{
2493
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2494
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2495
}
2496

    
2497
#if defined(WORDS_BIGENDIAN)
2498
#define LEFT 0
2499
#define RIGHT 1
2500
#else
2501
#define LEFT 1
2502
#define RIGHT 0
2503
#endif
2504
/* The specification says that the results are undefined if all of the
2505
 * shift counts are not identical.  We check to make sure that they are
2506
 * to conform to what real hardware appears to do.  */
2507
#define VSHIFT(suffix, leftp)                                           \
2508
    void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
2509
    {                                                                   \
2510
        int shift = b->u8[LO_IDX*0x15] & 0x7;                           \
2511
        int doit = 1;                                                   \
2512
        int i;                                                          \
2513
        for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
2514
            doit = doit && ((b->u8[i] & 0x7) == shift);                 \
2515
        }                                                               \
2516
        if (doit) {                                                     \
2517
            if (shift == 0) {                                           \
2518
                *r = *a;                                                \
2519
            } else if (leftp) {                                         \
2520
                uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
2521
                r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
2522
                r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
2523
            } else {                                                    \
2524
                uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
2525
                r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
2526
                r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
2527
            }                                                           \
2528
        }                                                               \
2529
    }
2530
VSHIFT(l, LEFT)
2531
VSHIFT(r, RIGHT)
2532
#undef VSHIFT
2533
#undef LEFT
2534
#undef RIGHT
2535

    
2536
#define VSL(suffix, element)                                            \
2537
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2538
    {                                                                   \
2539
        int i;                                                          \
2540
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2541
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2542
            unsigned int shift = b->element[i] & mask;                  \
2543
            r->element[i] = a->element[i] << shift;                     \
2544
        }                                                               \
2545
    }
2546
VSL(b, u8)
2547
VSL(h, u16)
2548
VSL(w, u32)
2549
#undef VSL
2550

    
2551
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2552
{
2553
    int sh = shift & 0xf;
2554
    int i;
2555
    ppc_avr_t result;
2556

    
2557
#if defined(WORDS_BIGENDIAN)
2558
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2559
        int index = sh + i;
2560
        if (index > 0xf) {
2561
            result.u8[i] = b->u8[index-0x10];
2562
        } else {
2563
            result.u8[i] = a->u8[index];
2564
        }
2565
    }
2566
#else
2567
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2568
        int index = (16 - sh) + i;
2569
        if (index > 0xf) {
2570
            result.u8[i] = a->u8[index-0x10];
2571
        } else {
2572
            result.u8[i] = b->u8[index];
2573
        }
2574
    }
2575
#endif
2576
    *r = result;
2577
}
2578

    
2579
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2580
{
2581
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2582

    
2583
#if defined (WORDS_BIGENDIAN)
2584
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2585
  memset (&r->u8[16-sh], 0, sh);
2586
#else
2587
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2588
  memset (&r->u8[0], 0, sh);
2589
#endif
2590
}
2591

    
2592
/* Experimental testing shows that hardware masks the immediate.  */
2593
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2594
#if defined(WORDS_BIGENDIAN)
2595
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2596
#else
2597
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2598
#endif
2599
#define VSPLT(suffix, element)                                          \
2600
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2601
    {                                                                   \
2602
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2603
        int i;                                                          \
2604
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2605
            r->element[i] = s;                                          \
2606
        }                                                               \
2607
    }
2608
VSPLT(b, u8)
2609
VSPLT(h, u16)
2610
VSPLT(w, u32)
2611
#undef VSPLT
2612
#undef SPLAT_ELEMENT
2613
#undef _SPLAT_MASKED
2614

    
2615
#define VSPLTI(suffix, element, splat_type)                     \
2616
    void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat)  \
2617
    {                                                           \
2618
        splat_type x = (int8_t)(splat << 3) >> 3;               \
2619
        int i;                                                  \
2620
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
2621
            r->element[i] = x;                                  \
2622
        }                                                       \
2623
    }
2624
VSPLTI(b, s8, int8_t)
2625
VSPLTI(h, s16, int16_t)
2626
VSPLTI(w, s32, int32_t)
2627
#undef VSPLTI
2628

    
2629
#define VSR(suffix, element)                                            \
2630
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2631
    {                                                                   \
2632
        int i;                                                          \
2633
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2634
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2635
            unsigned int shift = b->element[i] & mask;                  \
2636
            r->element[i] = a->element[i] >> shift;                     \
2637
        }                                                               \
2638
    }
2639
VSR(ab, s8)
2640
VSR(ah, s16)
2641
VSR(aw, s32)
2642
VSR(b, u8)
2643
VSR(h, u16)
2644
VSR(w, u32)
2645
#undef VSR
2646

    
2647
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2648
{
2649
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2650

    
2651
#if defined (WORDS_BIGENDIAN)
2652
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2653
  memset (&r->u8[0], 0, sh);
2654
#else
2655
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2656
  memset (&r->u8[16-sh], 0, sh);
2657
#endif
2658
}
2659

    
2660
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2661
{
2662
    int i;
2663
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2664
        r->u32[i] = a->u32[i] >= b->u32[i];
2665
    }
2666
}
2667

    
2668
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2669
{
2670
    int64_t t;
2671
    int i, upper;
2672
    ppc_avr_t result;
2673
    int sat = 0;
2674

    
2675
#if defined(WORDS_BIGENDIAN)
2676
    upper = ARRAY_SIZE(r->s32)-1;
2677
#else
2678
    upper = 0;
2679
#endif
2680
    t = (int64_t)b->s32[upper];
2681
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2682
        t += a->s32[i];
2683
        result.s32[i] = 0;
2684
    }
2685
    result.s32[upper] = cvtsdsw(t, &sat);
2686
    *r = result;
2687

    
2688
    if (sat) {
2689
        env->vscr |= (1 << VSCR_SAT);
2690
    }
2691
}
2692

    
2693
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2694
{
2695
    int i, j, upper;
2696
    ppc_avr_t result;
2697
    int sat = 0;
2698

    
2699
#if defined(WORDS_BIGENDIAN)
2700
    upper = 1;
2701
#else
2702
    upper = 0;
2703
#endif
2704
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2705
        int64_t t = (int64_t)b->s32[upper+i*2];
2706
        result.u64[i] = 0;
2707
        for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2708
            t += a->s32[2*i+j];
2709
        }
2710
        result.s32[upper+i*2] = cvtsdsw(t, &sat);
2711
    }
2712

    
2713
    *r = result;
2714
    if (sat) {
2715
        env->vscr |= (1 << VSCR_SAT);
2716
    }
2717
}
2718

    
2719
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2720
{
2721
    int i, j;
2722
    int sat = 0;
2723

    
2724
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2725
        int64_t t = (int64_t)b->s32[i];
2726
        for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2727
            t += a->s8[4*i+j];
2728
        }
2729
        r->s32[i] = cvtsdsw(t, &sat);
2730
    }
2731

    
2732
    if (sat) {
2733
        env->vscr |= (1 << VSCR_SAT);
2734
    }
2735
}
2736

    
2737
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2738
{
2739
    int sat = 0;
2740
    int i;
2741

    
2742
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2743
        int64_t t = (int64_t)b->s32[i];
2744
        t += a->s16[2*i] + a->s16[2*i+1];
2745
        r->s32[i] = cvtsdsw(t, &sat);
2746
    }
2747

    
2748
    if (sat) {
2749
        env->vscr |= (1 << VSCR_SAT);
2750
    }
2751
}
2752

    
2753
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2754
{
2755
    int i, j;
2756
    int sat = 0;
2757

    
2758
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2759
        uint64_t t = (uint64_t)b->u32[i];
2760
        for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2761
            t += a->u8[4*i+j];
2762
        }
2763
        r->u32[i] = cvtuduw(t, &sat);
2764
    }
2765

    
2766
    if (sat) {
2767
        env->vscr |= (1 << VSCR_SAT);
2768
    }
2769
}
2770

    
2771
#if defined(WORDS_BIGENDIAN)
2772
#define UPKHI 1
2773
#define UPKLO 0
2774
#else
2775
#define UPKHI 0
2776
#define UPKLO 1
2777
#endif
2778
#define VUPKPX(suffix, hi)                                      \
2779
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
2780
    {                                                           \
2781
        int i;                                                  \
2782
        ppc_avr_t result;                                       \
2783
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
2784
            uint16_t e = b->u16[hi ? i : i+4];                  \
2785
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
2786
            uint8_t r = (e >> 10) & 0x1f;                       \
2787
            uint8_t g = (e >> 5) & 0x1f;                        \
2788
            uint8_t b = e & 0x1f;                               \
2789
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
2790
        }                                                               \
2791
        *r = result;                                                    \
2792
    }
2793
VUPKPX(lpx, UPKLO)
2794
VUPKPX(hpx, UPKHI)
2795
#undef VUPKPX
2796

    
2797
#define VUPK(suffix, unpacked, packee, hi)                              \
2798
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2799
    {                                                                   \
2800
        int i;                                                          \
2801
        ppc_avr_t result;                                               \
2802
        if (hi) {                                                       \
2803
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
2804
                result.unpacked[i] = b->packee[i];                      \
2805
            }                                                           \
2806
        } else {                                                        \
2807
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
2808
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
2809
            }                                                           \
2810
        }                                                               \
2811
        *r = result;                                                    \
2812
    }
2813
VUPK(hsb, s16, s8, UPKHI)
2814
VUPK(hsh, s32, s16, UPKHI)
2815
VUPK(lsb, s16, s8, UPKLO)
2816
VUPK(lsh, s32, s16, UPKLO)
2817
#undef VUPK
2818
#undef UPKHI
2819
#undef UPKLO
2820

    
2821
#undef VECTOR_FOR_INORDER_I
2822
#undef HI_IDX
2823
#undef LO_IDX
2824

    
2825
/*****************************************************************************/
2826
/* SPE extension helpers */
2827
/* Use a table to make this quicker */
2828
static uint8_t hbrev[16] = {
2829
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2830
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
2831
};
2832

    
2833
static always_inline uint8_t byte_reverse (uint8_t val)
2834
{
2835
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
2836
}
2837

    
2838
static always_inline uint32_t word_reverse (uint32_t val)
2839
{
2840
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2841
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2842
}
2843

    
2844
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2845
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2846
{
2847
    uint32_t a, b, d, mask;
2848

    
2849
    mask = UINT32_MAX >> (32 - MASKBITS);
2850
    a = arg1 & mask;
2851
    b = arg2 & mask;
2852
    d = word_reverse(1 + word_reverse(a | ~b));
2853
    return (arg1 & ~mask) | (d & b);
2854
}
2855

    
2856
uint32_t helper_cntlsw32 (uint32_t val)
2857
{
2858
    if (val & 0x80000000)
2859
        return clz32(~val);
2860
    else
2861
        return clz32(val);
2862
}
2863

    
2864
uint32_t helper_cntlzw32 (uint32_t val)
2865
{
2866
    return clz32(val);
2867
}
2868

    
2869
/* Single-precision floating-point conversions */
2870
static always_inline uint32_t efscfsi (uint32_t val)
2871
{
2872
    CPU_FloatU u;
2873

    
2874
    u.f = int32_to_float32(val, &env->vec_status);
2875

    
2876
    return u.l;
2877
}
2878

    
2879
static always_inline uint32_t efscfui (uint32_t val)
2880
{
2881
    CPU_FloatU u;
2882

    
2883
    u.f = uint32_to_float32(val, &env->vec_status);
2884

    
2885
    return u.l;
2886
}
2887

    
2888
static always_inline int32_t efsctsi (uint32_t val)
2889
{
2890
    CPU_FloatU u;
2891

    
2892
    u.l = val;
2893
    /* NaN are not treated the same way IEEE 754 does */
2894
    if (unlikely(float32_is_nan(u.f)))
2895
        return 0;
2896

    
2897
    return float32_to_int32(u.f, &env->vec_status);
2898
}
2899

    
2900
static always_inline uint32_t efsctui (uint32_t val)
2901
{
2902
    CPU_FloatU u;
2903

    
2904
    u.l = val;
2905
    /* NaN are not treated the same way IEEE 754 does */
2906
    if (unlikely(float32_is_nan(u.f)))
2907
        return 0;
2908

    
2909
    return float32_to_uint32(u.f, &env->vec_status);
2910
}
2911

    
2912
static always_inline uint32_t efsctsiz (uint32_t val)
2913
{
2914
    CPU_FloatU u;
2915

    
2916
    u.l = val;
2917
    /* NaN are not treated the same way IEEE 754 does */
2918
    if (unlikely(float32_is_nan(u.f)))
2919
        return 0;
2920

    
2921
    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
2922
}
2923

    
2924
static always_inline uint32_t efsctuiz (uint32_t val)
2925
{
2926
    CPU_FloatU u;
2927

    
2928
    u.l = val;
2929
    /* NaN are not treated the same way IEEE 754 does */
2930
    if (unlikely(float32_is_nan(u.f)))
2931
        return 0;
2932

    
2933
    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
2934
}
2935

    
2936
static always_inline uint32_t efscfsf (uint32_t val)
2937
{
2938
    CPU_FloatU u;
2939
    float32 tmp;
2940

    
2941
    u.f = int32_to_float32(val, &env->vec_status);
2942
    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
2943
    u.f = float32_div(u.f, tmp, &env->vec_status);
2944

    
2945
    return u.l;
2946
}
2947

    
2948
static always_inline uint32_t efscfuf (uint32_t val)
2949
{
2950
    CPU_FloatU u;
2951
    float32 tmp;
2952

    
2953
    u.f = uint32_to_float32(val, &env->vec_status);
2954
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
2955
    u.f = float32_div(u.f, tmp, &env->vec_status);
2956

    
2957
    return u.l;
2958
}
2959

    
2960
static always_inline uint32_t efsctsf (uint32_t val)
2961
{
2962
    CPU_FloatU u;
2963
    float32 tmp;
2964

    
2965
    u.l = val;
2966
    /* NaN are not treated the same way IEEE 754 does */
2967
    if (unlikely(float32_is_nan(u.f)))
2968
        return 0;
2969
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
2970
    u.f = float32_mul(u.f, tmp, &env->vec_status);
2971

    
2972
    return float32_to_int32(u.f, &env->vec_status);
2973
}
2974

    
2975
static always_inline uint32_t efsctuf (uint32_t val)
2976
{
2977
    CPU_FloatU u;
2978
    float32 tmp;
2979

    
2980
    u.l = val;
2981
    /* NaN are not treated the same way IEEE 754 does */
2982
    if (unlikely(float32_is_nan(u.f)))
2983
        return 0;
2984
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
2985
    u.f = float32_mul(u.f, tmp, &env->vec_status);
2986

    
2987
    return float32_to_uint32(u.f, &env->vec_status);
2988
}
2989

    
2990
#define HELPER_SPE_SINGLE_CONV(name)                                          \
2991
uint32_t helper_e##name (uint32_t val)                                        \
2992
{                                                                             \
2993
    return e##name(val);                                                      \
2994
}
2995
/* efscfsi */
2996
HELPER_SPE_SINGLE_CONV(fscfsi);
2997
/* efscfui */
2998
HELPER_SPE_SINGLE_CONV(fscfui);
2999
/* efscfuf */
3000
HELPER_SPE_SINGLE_CONV(fscfuf);
3001
/* efscfsf */
3002
HELPER_SPE_SINGLE_CONV(fscfsf);
3003
/* efsctsi */
3004
HELPER_SPE_SINGLE_CONV(fsctsi);
3005
/* efsctui */
3006
HELPER_SPE_SINGLE_CONV(fsctui);
3007
/* efsctsiz */
3008
HELPER_SPE_SINGLE_CONV(fsctsiz);
3009
/* efsctuiz */
3010
HELPER_SPE_SINGLE_CONV(fsctuiz);
3011
/* efsctsf */
3012
HELPER_SPE_SINGLE_CONV(fsctsf);
3013
/* efsctuf */
3014
HELPER_SPE_SINGLE_CONV(fsctuf);
3015

    
3016
#define HELPER_SPE_VECTOR_CONV(name)                                          \
3017
uint64_t helper_ev##name (uint64_t val)                                       \
3018
{                                                                             \
3019
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
3020
            (uint64_t)e##name(val);                                           \
3021
}
3022
/* evfscfsi */
3023
HELPER_SPE_VECTOR_CONV(fscfsi);
3024
/* evfscfui */
3025
HELPER_SPE_VECTOR_CONV(fscfui);
3026
/* evfscfuf */
3027
HELPER_SPE_VECTOR_CONV(fscfuf);
3028
/* evfscfsf */
3029
HELPER_SPE_VECTOR_CONV(fscfsf);
3030
/* evfsctsi */
3031
HELPER_SPE_VECTOR_CONV(fsctsi);
3032
/* evfsctui */
3033
HELPER_SPE_VECTOR_CONV(fsctui);
3034
/* evfsctsiz */
3035
HELPER_SPE_VECTOR_CONV(fsctsiz);
3036
/* evfsctuiz */
3037
HELPER_SPE_VECTOR_CONV(fsctuiz);
3038
/* evfsctsf */
3039
HELPER_SPE_VECTOR_CONV(fsctsf);
3040
/* evfsctuf */
3041
HELPER_SPE_VECTOR_CONV(fsctuf);
3042

    
3043
/* Single-precision floating-point arithmetic */
3044
static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
3045
{
3046
    CPU_FloatU u1, u2;
3047
    u1.l = op1;
3048
    u2.l = op2;
3049
    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3050
    return u1.l;
3051
}
3052

    
3053
static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
3054
{
3055
    CPU_FloatU u1, u2;
3056
    u1.l = op1;
3057
    u2.l = op2;
3058
    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3059
    return u1.l;
3060
}
3061

    
3062
static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
3063
{
3064
    CPU_FloatU u1, u2;
3065
    u1.l = op1;
3066
    u2.l = op2;
3067
    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3068
    return u1.l;
3069
}
3070

    
3071
static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
3072
{
3073
    CPU_FloatU u1, u2;
3074
    u1.l = op1;
3075
    u2.l = op2;
3076
    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3077
    return u1.l;
3078
}
3079

    
3080
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
3081
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3082
{                                                                             \
3083
    return e##name(op1, op2);                                                 \
3084
}
3085
/* efsadd */
3086
HELPER_SPE_SINGLE_ARITH(fsadd);
3087
/* efssub */
3088
HELPER_SPE_SINGLE_ARITH(fssub);
3089
/* efsmul */
3090
HELPER_SPE_SINGLE_ARITH(fsmul);
3091
/* efsdiv */
3092
HELPER_SPE_SINGLE_ARITH(fsdiv);
3093

    
3094
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
3095
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3096
{                                                                             \
3097
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
3098
            (uint64_t)e##name(op1, op2);                                      \
3099
}
3100
/* evfsadd */
3101
HELPER_SPE_VECTOR_ARITH(fsadd);
3102
/* evfssub */
3103
HELPER_SPE_VECTOR_ARITH(fssub);
3104
/* evfsmul */
3105
HELPER_SPE_VECTOR_ARITH(fsmul);
3106
/* evfsdiv */
3107
HELPER_SPE_VECTOR_ARITH(fsdiv);
3108

    
3109
/* Single-precision floating-point comparisons */
3110
static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
3111
{
3112
    CPU_FloatU u1, u2;
3113
    u1.l = op1;
3114
    u2.l = op2;
3115
    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3116
}
3117

    
3118
static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
3119
{
3120
    CPU_FloatU u1, u2;
3121
    u1.l = op1;
3122
    u2.l = op2;
3123
    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3124
}
3125

    
3126
static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
3127
{
3128
    CPU_FloatU u1, u2;
3129
    u1.l = op1;
3130
    u2.l = op2;
3131
    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3132
}
3133

    
3134
static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
3135
{
3136
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3137
    return efststlt(op1, op2);
3138
}
3139

    
3140
static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
3141
{
3142
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3143
    return efststgt(op1, op2);
3144
}
3145

    
3146
static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
3147
{
3148
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3149
    return efststeq(op1, op2);
3150
}
3151

    
3152
#define HELPER_SINGLE_SPE_CMP(name)                                           \
3153
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3154
{                                                                             \
3155
    return e##name(op1, op2) << 2;                                            \
3156
}
3157
/* efststlt */
3158
HELPER_SINGLE_SPE_CMP(fststlt);
3159
/* efststgt */
3160
HELPER_SINGLE_SPE_CMP(fststgt);
3161
/* efststeq */
3162
HELPER_SINGLE_SPE_CMP(fststeq);
3163
/* efscmplt */
3164
HELPER_SINGLE_SPE_CMP(fscmplt);
3165
/* efscmpgt */
3166
HELPER_SINGLE_SPE_CMP(fscmpgt);
3167
/* efscmpeq */
3168
HELPER_SINGLE_SPE_CMP(fscmpeq);
3169

    
3170
static always_inline uint32_t evcmp_merge (int t0, int t1)
3171
{
3172
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3173
}
3174

    
3175
#define HELPER_VECTOR_SPE_CMP(name)                                           \
3176
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3177
{                                                                             \
3178
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
3179
}
3180
/* evfststlt */
3181
HELPER_VECTOR_SPE_CMP(fststlt);
3182
/* evfststgt */
3183
HELPER_VECTOR_SPE_CMP(fststgt);
3184
/* evfststeq */
3185
HELPER_VECTOR_SPE_CMP(fststeq);
3186
/* evfscmplt */
3187
HELPER_VECTOR_SPE_CMP(fscmplt);
3188
/* evfscmpgt */
3189
HELPER_VECTOR_SPE_CMP(fscmpgt);
3190
/* evfscmpeq */
3191
HELPER_VECTOR_SPE_CMP(fscmpeq);
3192

    
3193
/* Double-precision floating-point conversion */
3194
uint64_t helper_efdcfsi (uint32_t val)
3195
{
3196
    CPU_DoubleU u;
3197

    
3198
    u.d = int32_to_float64(val, &env->vec_status);
3199

    
3200
    return u.ll;
3201
}
3202

    
3203
uint64_t helper_efdcfsid (uint64_t val)
3204
{
3205
    CPU_DoubleU u;
3206

    
3207
    u.d = int64_to_float64(val, &env->vec_status);
3208

    
3209
    return u.ll;
3210
}
3211

    
3212
uint64_t helper_efdcfui (uint32_t val)
3213
{
3214
    CPU_DoubleU u;
3215

    
3216
    u.d = uint32_to_float64(val, &env->vec_status);
3217

    
3218
    return u.ll;
3219
}
3220

    
3221
uint64_t helper_efdcfuid (uint64_t val)
3222
{
3223
    CPU_DoubleU u;
3224

    
3225
    u.d = uint64_to_float64(val, &env->vec_status);
3226

    
3227
    return u.ll;
3228
}
3229

    
3230
uint32_t helper_efdctsi (uint64_t val)
3231
{
3232
    CPU_DoubleU u;
3233

    
3234
    u.ll = val;
3235
    /* NaN are not treated the same way IEEE 754 does */
3236
    if (unlikely(float64_is_nan(u.d)))
3237
        return 0;
3238

    
3239
    return float64_to_int32(u.d, &env->vec_status);
3240
}
3241

    
3242
uint32_t helper_efdctui (uint64_t val)
3243
{
3244
    CPU_DoubleU u;
3245

    
3246
    u.ll = val;
3247
    /* NaN are not treated the same way IEEE 754 does */
3248
    if (unlikely(float64_is_nan(u.d)))
3249
        return 0;
3250

    
3251
    return float64_to_uint32(u.d, &env->vec_status);
3252
}
3253

    
3254
uint32_t helper_efdctsiz (uint64_t val)
3255
{
3256
    CPU_DoubleU u;
3257

    
3258
    u.ll = val;
3259
    /* NaN are not treated the same way IEEE 754 does */
3260
    if (unlikely(float64_is_nan(u.d)))
3261
        return 0;
3262

    
3263
    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3264
}
3265

    
3266
uint64_t helper_efdctsidz (uint64_t val)
3267
{
3268
    CPU_DoubleU u;
3269

    
3270
    u.ll = val;
3271
    /* NaN are not treated the same way IEEE 754 does */
3272
    if (unlikely(float64_is_nan(u.d)))
3273
        return 0;
3274

    
3275
    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3276
}
3277

    
3278
uint32_t helper_efdctuiz (uint64_t val)
3279
{
3280
    CPU_DoubleU u;
3281

    
3282
    u.ll = val;
3283
    /* NaN are not treated the same way IEEE 754 does */
3284
    if (unlikely(float64_is_nan(u.d)))
3285
        return 0;
3286

    
3287
    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3288
}
3289

    
3290
uint64_t helper_efdctuidz (uint64_t val)
3291
{
3292
    CPU_DoubleU u;
3293

    
3294
    u.ll = val;
3295
    /* NaN are not treated the same way IEEE 754 does */
3296
    if (unlikely(float64_is_nan(u.d)))
3297
        return 0;
3298

    
3299
    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3300
}
3301

    
3302
uint64_t helper_efdcfsf (uint32_t val)
3303
{
3304
    CPU_DoubleU u;
3305
    float64 tmp;
3306

    
3307
    u.d = int32_to_float64(val, &env->vec_status);
3308
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3309
    u.d = float64_div(u.d, tmp, &env->vec_status);
3310

    
3311
    return u.ll;
3312
}
3313

    
3314
uint64_t helper_efdcfuf (uint32_t val)
3315
{
3316
    CPU_DoubleU u;
3317
    float64 tmp;
3318

    
3319
    u.d = uint32_to_float64(val, &env->vec_status);
3320
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3321
    u.d = float64_div(u.d, tmp, &env->vec_status);
3322

    
3323
    return u.ll;
3324
}
3325

    
3326
uint32_t helper_efdctsf (uint64_t val)
3327
{
3328
    CPU_DoubleU u;
3329
    float64 tmp;
3330

    
3331
    u.ll = val;
3332
    /* NaN are not treated the same way IEEE 754 does */
3333
    if (unlikely(float64_is_nan(u.d)))
3334
        return 0;
3335
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3336
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3337

    
3338
    return float64_to_int32(u.d, &env->vec_status);
3339
}
3340

    
3341
uint32_t helper_efdctuf (uint64_t val)
3342
{
3343
    CPU_DoubleU u;
3344
    float64 tmp;
3345

    
3346
    u.ll = val;
3347
    /* NaN are not treated the same way IEEE 754 does */
3348
    if (unlikely(float64_is_nan(u.d)))
3349
        return 0;
3350
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3351
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3352

    
3353
    return float64_to_uint32(u.d, &env->vec_status);
3354
}
3355

    
3356
uint32_t helper_efscfd (uint64_t val)
3357
{
3358
    CPU_DoubleU u1;
3359
    CPU_FloatU u2;
3360

    
3361
    u1.ll = val;
3362
    u2.f = float64_to_float32(u1.d, &env->vec_status);
3363

    
3364
    return u2.l;
3365
}
3366

    
3367
uint64_t helper_efdcfs (uint32_t val)
3368
{
3369
    CPU_DoubleU u2;
3370
    CPU_FloatU u1;
3371

    
3372
    u1.l = val;
3373
    u2.d = float32_to_float64(u1.f, &env->vec_status);
3374

    
3375
    return u2.ll;
3376
}
3377

    
3378
/* Double precision fixed-point arithmetic */
3379
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3380
{
3381
    CPU_DoubleU u1, u2;
3382
    u1.ll = op1;
3383
    u2.ll = op2;
3384
    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3385
    return u1.ll;
3386
}
3387

    
3388
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3389
{
3390
    CPU_DoubleU u1, u2;
3391
    u1.ll = op1;
3392
    u2.ll = op2;
3393
    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3394
    return u1.ll;
3395
}
3396

    
3397
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3398
{
3399
    CPU_DoubleU u1, u2;
3400
    u1.ll = op1;
3401
    u2.ll = op2;
3402
    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3403
    return u1.ll;
3404
}
3405

    
3406
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3407
{
3408
    CPU_DoubleU u1, u2;
3409
    u1.ll = op1;
3410
    u2.ll = op2;
3411
    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3412
    return u1.ll;
3413
}
3414

    
3415
/* Double precision floating point helpers */
3416
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3417
{
3418
    CPU_DoubleU u1, u2;
3419
    u1.ll = op1;
3420
    u2.ll = op2;
3421
    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3422
}
3423

    
3424
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3425
{
3426
    CPU_DoubleU u1, u2;
3427
    u1.ll = op1;
3428
    u2.ll = op2;
3429
    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3430
}
3431

    
3432
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3433
{
3434
    CPU_DoubleU u1, u2;
3435
    u1.ll = op1;
3436
    u2.ll = op2;
3437
    return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3438
}
3439

    
3440
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3441
{
3442
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3443
    return helper_efdtstlt(op1, op2);
3444
}
3445

    
3446
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3447
{
3448
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3449
    return helper_efdtstgt(op1, op2);
3450
}
3451

    
3452
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3453
{
3454
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3455
    return helper_efdtsteq(op1, op2);
3456
}
3457

    
3458
/*****************************************************************************/
3459
/* Softmmu support */
3460
#if !defined (CONFIG_USER_ONLY)
3461

    
3462
#define MMUSUFFIX _mmu
3463

    
3464
#define SHIFT 0
3465
#include "softmmu_template.h"
3466

    
3467
#define SHIFT 1
3468
#include "softmmu_template.h"
3469

    
3470
#define SHIFT 2
3471
#include "softmmu_template.h"
3472

    
3473
#define SHIFT 3
3474
#include "softmmu_template.h"
3475

    
3476
/* try to fill the TLB and return an exception if error. If retaddr is
3477
   NULL, it means that the function was called in C code (i.e. not
3478
   from generated code or from helper.c) */
3479
/* XXX: fix it to restore all registers */
3480
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3481
{
3482
    TranslationBlock *tb;
3483
    CPUState *saved_env;
3484
    unsigned long pc;
3485
    int ret;
3486

    
3487
    /* XXX: hack to restore env in all cases, even if not called from
3488
       generated code */
3489
    saved_env = env;
3490
    env = cpu_single_env;
3491
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3492
    if (unlikely(ret != 0)) {
3493
        if (likely(retaddr)) {
3494
            /* now we have a real cpu fault */
3495
            pc = (unsigned long)retaddr;
3496
            tb = tb_find_pc(pc);
3497
            if (likely(tb)) {
3498
                /* the PC is inside the translated code. It means that we have
3499
                   a virtual CPU fault */
3500
                cpu_restore_state(tb, env, pc, NULL);
3501
            }
3502
        }
3503
        helper_raise_exception_err(env->exception_index, env->error_code);
3504
    }
3505
    env = saved_env;
3506
}
3507

    
3508
/* Segment registers load and store */
3509
target_ulong helper_load_sr (target_ulong sr_num)
3510
{
3511
    return env->sr[sr_num];
3512
}
3513

    
3514
void helper_store_sr (target_ulong sr_num, target_ulong val)
3515
{
3516
    ppc_store_sr(env, sr_num, val);
3517
}
3518

    
3519
/* SLB management */
3520
#if defined(TARGET_PPC64)
3521
target_ulong helper_load_slb (target_ulong slb_nr)
3522
{
3523
    return ppc_load_slb(env, slb_nr);
3524
}
3525

    
3526
void helper_store_slb (target_ulong slb_nr, target_ulong rs)
3527
{
3528
    ppc_store_slb(env, slb_nr, rs);
3529
}
3530

    
3531
void helper_slbia (void)
3532
{
3533
    ppc_slb_invalidate_all(env);
3534
}
3535

    
3536
void helper_slbie (target_ulong addr)
3537
{
3538
    ppc_slb_invalidate_one(env, addr);
3539
}
3540

    
3541
#endif /* defined(TARGET_PPC64) */
3542

    
3543
/* TLB management */
3544
void helper_tlbia (void)
3545
{
3546
    ppc_tlb_invalidate_all(env);
3547
}
3548

    
3549
void helper_tlbie (target_ulong addr)
3550
{
3551
    ppc_tlb_invalidate_one(env, addr);
3552
}
3553

    
3554
/* Software driven TLBs management */
3555
/* PowerPC 602/603 software TLB load instructions helpers */
3556
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3557
{
3558
    target_ulong RPN, CMP, EPN;
3559
    int way;
3560

    
3561
    RPN = env->spr[SPR_RPA];
3562
    if (is_code) {
3563
        CMP = env->spr[SPR_ICMP];
3564
        EPN = env->spr[SPR_IMISS];
3565
    } else {
3566
        CMP = env->spr[SPR_DCMP];
3567
        EPN = env->spr[SPR_DMISS];
3568
    }
3569
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3570
    LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3571
                " PTE1 " ADDRX " way %d\n",
3572
                __func__, new_EPN, EPN, CMP, RPN, way);
3573
    /* Store this TLB */
3574
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3575
                     way, is_code, CMP, RPN);
3576
}
3577

    
3578
void helper_6xx_tlbd (target_ulong EPN)
3579
{
3580
    do_6xx_tlb(EPN, 0);
3581
}
3582

    
3583
void helper_6xx_tlbi (target_ulong EPN)
3584
{
3585
    do_6xx_tlb(EPN, 1);
3586
}
3587

    
3588
/* PowerPC 74xx software TLB load instructions helpers */
3589
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3590
{
3591
    target_ulong RPN, CMP, EPN;
3592
    int way;
3593

    
3594
    RPN = env->spr[SPR_PTELO];
3595
    CMP = env->spr[SPR_PTEHI];
3596
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3597
    way = env->spr[SPR_TLBMISS] & 0x3;
3598
    LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3599
                " PTE1 " ADDRX " way %d\n",
3600
                __func__, new_EPN, EPN, CMP, RPN, way);
3601
    /* Store this TLB */
3602
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3603
                     way, is_code, CMP, RPN);
3604
}
3605

    
3606
void helper_74xx_tlbd (target_ulong EPN)
3607
{
3608
    do_74xx_tlb(EPN, 0);
3609
}
3610

    
3611
void helper_74xx_tlbi (target_ulong EPN)
3612
{
3613
    do_74xx_tlb(EPN, 1);
3614
}
3615

    
3616
static always_inline target_ulong booke_tlb_to_page_size (int size)
3617
{
3618
    return 1024 << (2 * size);
3619
}
3620

    
3621
static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3622
{
3623
    int size;
3624

    
3625
    switch (page_size) {
3626
    case 0x00000400UL:
3627
        size = 0x0;
3628
        break;
3629
    case 0x00001000UL:
3630
        size = 0x1;
3631
        break;
3632
    case 0x00004000UL:
3633
        size = 0x2;
3634
        break;
3635
    case 0x00010000UL:
3636
        size = 0x3;
3637
        break;
3638
    case 0x00040000UL:
3639
        size = 0x4;
3640
        break;
3641
    case 0x00100000UL:
3642
        size = 0x5;
3643
        break;
3644
    case 0x00400000UL:
3645
        size = 0x6;
3646
        break;
3647
    case 0x01000000UL:
3648
        size = 0x7;
3649
        break;
3650
    case 0x04000000UL:
3651
        size = 0x8;
3652
        break;
3653
    case 0x10000000UL:
3654
        size = 0x9;
3655
        break;
3656
    case 0x40000000UL:
3657
        size = 0xA;
3658
        break;
3659
#if defined (TARGET_PPC64)
3660
    case 0x000100000000ULL:
3661
        size = 0xB;
3662
        break;
3663
    case 0x000400000000ULL:
3664
        size = 0xC;
3665
        break;
3666
    case 0x001000000000ULL:
3667
        size = 0xD;
3668
        break;
3669
    case 0x004000000000ULL:
3670
        size = 0xE;
3671
        break;
3672
    case 0x010000000000ULL:
3673
        size = 0xF;
3674
        break;
3675
#endif
3676
    default:
3677
        size = -1;
3678
        break;
3679
    }
3680

    
3681
    return size;
3682
}
3683

    
3684
/* Helpers for 4xx TLB management */
3685
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3686
{
3687
    ppcemb_tlb_t *tlb;
3688
    target_ulong ret;
3689
    int size;
3690

    
3691
    entry &= 0x3F;
3692
    tlb = &env->tlb[entry].tlbe;
3693
    ret = tlb->EPN;
3694
    if (tlb->prot & PAGE_VALID)
3695
        ret |= 0x400;
3696
    size = booke_page_size_to_tlb(tlb->size);
3697
    if (size < 0 || size > 0x7)
3698
        size = 1;
3699
    ret |= size << 7;
3700
    env->spr[SPR_40x_PID] = tlb->PID;
3701
    return ret;
3702
}
3703

    
3704
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3705
{
3706
    ppcemb_tlb_t *tlb;
3707
    target_ulong ret;
3708

    
3709
    entry &= 0x3F;
3710
    tlb = &env->tlb[entry].tlbe;
3711
    ret = tlb->RPN;
3712
    if (tlb->prot & PAGE_EXEC)
3713
        ret |= 0x200;
3714
    if (tlb->prot & PAGE_WRITE)
3715
        ret |= 0x100;
3716
    return ret;
3717
}
3718

    
3719
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3720
{
3721
    ppcemb_tlb_t *tlb;
3722
    target_ulong page, end;
3723

    
3724
    LOG_SWTLB("%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3725
    entry &= 0x3F;
3726
    tlb = &env->tlb[entry].tlbe;
3727
    /* Invalidate previous TLB (if it's valid) */
3728
    if (tlb->prot & PAGE_VALID) {
3729
        end = tlb->EPN + tlb->size;
3730
        LOG_SWTLB("%s: invalidate old TLB %d start " ADDRX
3731
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3732
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3733
            tlb_flush_page(env, page);
3734
    }
3735
    tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3736
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3737
     * If this ever occurs, one should use the ppcemb target instead
3738
     * of the ppc or ppc64 one
3739
     */
3740
    if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3741
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3742
                  "are not supported (%d)\n",
3743
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3744
    }
3745
    tlb->EPN = val & ~(tlb->size - 1);
3746
    if (val & 0x40)
3747
        tlb->prot |= PAGE_VALID;
3748
    else
3749
        tlb->prot &= ~PAGE_VALID;
3750
    if (val & 0x20) {
3751
        /* XXX: TO BE FIXED */
3752
        cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3753
    }
3754
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3755
    tlb->attr = val & 0xFF;
3756
    LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3757
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3758
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3759
                tlb->prot & PAGE_READ ? 'r' : '-',
3760
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3761
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3762
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3763
    /* Invalidate new TLB (if valid) */
3764
    if (tlb->prot & PAGE_VALID) {
3765
        end = tlb->EPN + tlb->size;
3766
        LOG_SWTLB("%s: invalidate TLB %d start " ADDRX
3767
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3768
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3769
            tlb_flush_page(env, page);
3770
    }
3771
}
3772

    
3773
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
3774
{
3775
    ppcemb_tlb_t *tlb;
3776

    
3777
    LOG_SWTLB("%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
3778
    entry &= 0x3F;
3779
    tlb = &env->tlb[entry].tlbe;
3780
    tlb->RPN = val & 0xFFFFFC00;
3781
    tlb->prot = PAGE_READ;
3782
    if (val & 0x200)
3783
        tlb->prot |= PAGE_EXEC;
3784
    if (val & 0x100)
3785
        tlb->prot |= PAGE_WRITE;
3786
    LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3787
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3788
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3789
                tlb->prot & PAGE_READ ? 'r' : '-',
3790
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3791
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3792
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3793
}
3794

    
3795
target_ulong helper_4xx_tlbsx (target_ulong address)
3796
{
3797
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
3798
}
3799

    
3800
/* PowerPC 440 TLB management */
3801
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
3802
{
3803
    ppcemb_tlb_t *tlb;
3804
    target_ulong EPN, RPN, size;
3805
    int do_flush_tlbs;
3806

    
3807
    LOG_SWTLB("%s word %d entry %d value " ADDRX "\n",
3808
                __func__, word, (int)entry, value);
3809
    do_flush_tlbs = 0;
3810
    entry &= 0x3F;
3811
    tlb = &env->tlb[entry].tlbe;
3812
    switch (word) {
3813
    default:
3814
        /* Just here to please gcc */
3815
    case 0:
3816
        EPN = value & 0xFFFFFC00;
3817
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
3818
            do_flush_tlbs = 1;
3819
        tlb->EPN = EPN;
3820
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
3821
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3822
            do_flush_tlbs = 1;
3823
        tlb->size = size;
3824
        tlb->attr &= ~0x1;
3825
        tlb->attr |= (value >> 8) & 1;
3826
        if (value & 0x200) {
3827
            tlb->prot |= PAGE_VALID;
3828
        } else {
3829
            if (tlb->prot & PAGE_VALID) {
3830
                tlb->prot &= ~PAGE_VALID;
3831
                do_flush_tlbs = 1;
3832
            }
3833
        }
3834
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3835
        if (do_flush_tlbs)
3836
            tlb_flush(env, 1);
3837
        break;
3838
    case 1:
3839
        RPN = value & 0xFFFFFC0F;
3840
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3841
            tlb_flush(env, 1);
3842
        tlb->RPN = RPN;
3843
        break;
3844
    case 2:
3845
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
3846
        tlb->prot = tlb->prot & PAGE_VALID;
3847
        if (value & 0x1)
3848
            tlb->prot |= PAGE_READ << 4;
3849
        if (value & 0x2)
3850
            tlb->prot |= PAGE_WRITE << 4;
3851
        if (value & 0x4)
3852
            tlb->prot |= PAGE_EXEC << 4;
3853
        if (value & 0x8)
3854
            tlb->prot |= PAGE_READ;
3855
        if (value & 0x10)
3856
            tlb->prot |= PAGE_WRITE;
3857
        if (value & 0x20)
3858
            tlb->prot |= PAGE_EXEC;
3859
        break;
3860
    }
3861
}
3862

    
3863
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
3864
{
3865
    ppcemb_tlb_t *tlb;
3866
    target_ulong ret;
3867
    int size;
3868

    
3869
    entry &= 0x3F;
3870
    tlb = &env->tlb[entry].tlbe;
3871
    switch (word) {
3872
    default:
3873
        /* Just here to please gcc */
3874
    case 0:
3875
        ret = tlb->EPN;
3876
        size = booke_page_size_to_tlb(tlb->size);
3877
        if (size < 0 || size > 0xF)
3878
            size = 1;
3879
        ret |= size << 4;
3880
        if (tlb->attr & 0x1)
3881
            ret |= 0x100;
3882
        if (tlb->prot & PAGE_VALID)
3883
            ret |= 0x200;
3884
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3885
        env->spr[SPR_440_MMUCR] |= tlb->PID;
3886
        break;
3887
    case 1:
3888
        ret = tlb->RPN;
3889
        break;
3890
    case 2:
3891
        ret = tlb->attr & ~0x1;
3892
        if (tlb->prot & (PAGE_READ << 4))
3893
            ret |= 0x1;
3894
        if (tlb->prot & (PAGE_WRITE << 4))
3895
            ret |= 0x2;
3896
        if (tlb->prot & (PAGE_EXEC << 4))
3897
            ret |= 0x4;
3898
        if (tlb->prot & PAGE_READ)
3899
            ret |= 0x8;
3900
        if (tlb->prot & PAGE_WRITE)
3901
            ret |= 0x10;
3902
        if (tlb->prot & PAGE_EXEC)
3903
            ret |= 0x20;
3904
        break;
3905
    }
3906
    return ret;
3907
}
3908

    
3909
target_ulong helper_440_tlbsx (target_ulong address)
3910
{
3911
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
3912
}
3913

    
3914
#endif /* !CONFIG_USER_ONLY */