Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ fbd265b6

History | View | Annotate | Download (113.1 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include <string.h>
21
#include "exec.h"
22
#include "host-utils.h"
23
#include "helper.h"
24

    
25
#include "helper_regs.h"
26

    
27
//#define DEBUG_OP
28
//#define DEBUG_EXCEPTIONS
29
//#define DEBUG_SOFTWARE_TLB
30

    
31
#ifdef DEBUG_SOFTWARE_TLB
32
#  define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
33
#else
34
#  define LOG_SWTLB(...) do { } while (0)
35
#endif
36

    
37

    
38
/*****************************************************************************/
39
/* Exceptions processing helpers */
40

    
41
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
42
{
43
#if 0
44
    printf("Raise exception %3x code : %d\n", exception, error_code);
45
#endif
46
    env->exception_index = exception;
47
    env->error_code = error_code;
48
    cpu_loop_exit();
49
}
50

    
51
void helper_raise_exception (uint32_t exception)
52
{
53
    helper_raise_exception_err(exception, 0);
54
}
55

    
56
/*****************************************************************************/
57
/* Registers load and stores */
58
target_ulong helper_load_cr (void)
59
{
60
    return (env->crf[0] << 28) |
61
           (env->crf[1] << 24) |
62
           (env->crf[2] << 20) |
63
           (env->crf[3] << 16) |
64
           (env->crf[4] << 12) |
65
           (env->crf[5] << 8) |
66
           (env->crf[6] << 4) |
67
           (env->crf[7] << 0);
68
}
69

    
70
void helper_store_cr (target_ulong val, uint32_t mask)
71
{
72
    int i, sh;
73

    
74
    for (i = 0, sh = 7; i < 8; i++, sh--) {
75
        if (mask & (1 << sh))
76
            env->crf[i] = (val >> (sh * 4)) & 0xFUL;
77
    }
78
}
79

    
80
/*****************************************************************************/
81
/* SPR accesses */
82
void helper_load_dump_spr (uint32_t sprn)
83
{
84
    qemu_log("Read SPR %d %03x => " ADDRX "\n",
85
                sprn, sprn, env->spr[sprn]);
86
}
87

    
88
void helper_store_dump_spr (uint32_t sprn)
89
{
90
    qemu_log("Write SPR %d %03x <= " ADDRX "\n",
91
                sprn, sprn, env->spr[sprn]);
92
}
93

    
94
target_ulong helper_load_tbl (void)
95
{
96
    return cpu_ppc_load_tbl(env);
97
}
98

    
99
target_ulong helper_load_tbu (void)
100
{
101
    return cpu_ppc_load_tbu(env);
102
}
103

    
104
target_ulong helper_load_atbl (void)
105
{
106
    return cpu_ppc_load_atbl(env);
107
}
108

    
109
target_ulong helper_load_atbu (void)
110
{
111
    return cpu_ppc_load_atbu(env);
112
}
113

    
114
target_ulong helper_load_601_rtcl (void)
115
{
116
    return cpu_ppc601_load_rtcl(env);
117
}
118

    
119
target_ulong helper_load_601_rtcu (void)
120
{
121
    return cpu_ppc601_load_rtcu(env);
122
}
123

    
124
#if !defined(CONFIG_USER_ONLY)
125
#if defined (TARGET_PPC64)
126
void helper_store_asr (target_ulong val)
127
{
128
    ppc_store_asr(env, val);
129
}
130
#endif
131

    
132
void helper_store_sdr1 (target_ulong val)
133
{
134
    ppc_store_sdr1(env, val);
135
}
136

    
137
void helper_store_tbl (target_ulong val)
138
{
139
    cpu_ppc_store_tbl(env, val);
140
}
141

    
142
void helper_store_tbu (target_ulong val)
143
{
144
    cpu_ppc_store_tbu(env, val);
145
}
146

    
147
void helper_store_atbl (target_ulong val)
148
{
149
    cpu_ppc_store_atbl(env, val);
150
}
151

    
152
void helper_store_atbu (target_ulong val)
153
{
154
    cpu_ppc_store_atbu(env, val);
155
}
156

    
157
void helper_store_601_rtcl (target_ulong val)
158
{
159
    cpu_ppc601_store_rtcl(env, val);
160
}
161

    
162
void helper_store_601_rtcu (target_ulong val)
163
{
164
    cpu_ppc601_store_rtcu(env, val);
165
}
166

    
167
target_ulong helper_load_decr (void)
168
{
169
    return cpu_ppc_load_decr(env);
170
}
171

    
172
void helper_store_decr (target_ulong val)
173
{
174
    cpu_ppc_store_decr(env, val);
175
}
176

    
177
void helper_store_hid0_601 (target_ulong val)
178
{
179
    target_ulong hid0;
180

    
181
    hid0 = env->spr[SPR_HID0];
182
    if ((val ^ hid0) & 0x00000008) {
183
        /* Change current endianness */
184
        env->hflags &= ~(1 << MSR_LE);
185
        env->hflags_nmsr &= ~(1 << MSR_LE);
186
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
187
        env->hflags |= env->hflags_nmsr;
188
        qemu_log("%s: set endianness to %c => " ADDRX "\n",
189
                    __func__, val & 0x8 ? 'l' : 'b', env->hflags);
190
    }
191
    env->spr[SPR_HID0] = (uint32_t)val;
192
}
193

    
194
void helper_store_403_pbr (uint32_t num, target_ulong value)
195
{
196
    if (likely(env->pb[num] != value)) {
197
        env->pb[num] = value;
198
        /* Should be optimized */
199
        tlb_flush(env, 1);
200
    }
201
}
202

    
203
target_ulong helper_load_40x_pit (void)
204
{
205
    return load_40x_pit(env);
206
}
207

    
208
void helper_store_40x_pit (target_ulong val)
209
{
210
    store_40x_pit(env, val);
211
}
212

    
213
void helper_store_40x_dbcr0 (target_ulong val)
214
{
215
    store_40x_dbcr0(env, val);
216
}
217

    
218
void helper_store_40x_sler (target_ulong val)
219
{
220
    store_40x_sler(env, val);
221
}
222

    
223
void helper_store_booke_tcr (target_ulong val)
224
{
225
    store_booke_tcr(env, val);
226
}
227

    
228
void helper_store_booke_tsr (target_ulong val)
229
{
230
    store_booke_tsr(env, val);
231
}
232

    
233
void helper_store_ibatu (uint32_t nr, target_ulong val)
234
{
235
    ppc_store_ibatu(env, nr, val);
236
}
237

    
238
void helper_store_ibatl (uint32_t nr, target_ulong val)
239
{
240
    ppc_store_ibatl(env, nr, val);
241
}
242

    
243
void helper_store_dbatu (uint32_t nr, target_ulong val)
244
{
245
    ppc_store_dbatu(env, nr, val);
246
}
247

    
248
void helper_store_dbatl (uint32_t nr, target_ulong val)
249
{
250
    ppc_store_dbatl(env, nr, val);
251
}
252

    
253
void helper_store_601_batl (uint32_t nr, target_ulong val)
254
{
255
    ppc_store_ibatl_601(env, nr, val);
256
}
257

    
258
void helper_store_601_batu (uint32_t nr, target_ulong val)
259
{
260
    ppc_store_ibatu_601(env, nr, val);
261
}
262
#endif
263

    
264
/*****************************************************************************/
265
/* Memory load and stores */
266

    
267
static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
268
{
269
#if defined(TARGET_PPC64)
270
        if (!msr_sf)
271
            return (uint32_t)(addr + arg);
272
        else
273
#endif
274
            return addr + arg;
275
}
276

    
277
void helper_lmw (target_ulong addr, uint32_t reg)
278
{
279
    for (; reg < 32; reg++) {
280
        if (msr_le)
281
            env->gpr[reg] = bswap32(ldl(addr));
282
        else
283
            env->gpr[reg] = ldl(addr);
284
        addr = addr_add(addr, 4);
285
    }
286
}
287

    
288
void helper_stmw (target_ulong addr, uint32_t reg)
289
{
290
    for (; reg < 32; reg++) {
291
        if (msr_le)
292
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
293
        else
294
            stl(addr, (uint32_t)env->gpr[reg]);
295
        addr = addr_add(addr, 4);
296
    }
297
}
298

    
299
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
300
{
301
    int sh;
302
    for (; nb > 3; nb -= 4) {
303
        env->gpr[reg] = ldl(addr);
304
        reg = (reg + 1) % 32;
305
        addr = addr_add(addr, 4);
306
    }
307
    if (unlikely(nb > 0)) {
308
        env->gpr[reg] = 0;
309
        for (sh = 24; nb > 0; nb--, sh -= 8) {
310
            env->gpr[reg] |= ldub(addr) << sh;
311
            addr = addr_add(addr, 1);
312
        }
313
    }
314
}
315
/* PPC32 specification says we must generate an exception if
316
 * rA is in the range of registers to be loaded.
317
 * In an other hand, IBM says this is valid, but rA won't be loaded.
318
 * For now, I'll follow the spec...
319
 */
320
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
321
{
322
    if (likely(xer_bc != 0)) {
323
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
324
                     (reg < rb && (reg + xer_bc) > rb))) {
325
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
326
                                       POWERPC_EXCP_INVAL |
327
                                       POWERPC_EXCP_INVAL_LSWX);
328
        } else {
329
            helper_lsw(addr, xer_bc, reg);
330
        }
331
    }
332
}
333

    
334
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
335
{
336
    int sh;
337
    for (; nb > 3; nb -= 4) {
338
        stl(addr, env->gpr[reg]);
339
        reg = (reg + 1) % 32;
340
        addr = addr_add(addr, 4);
341
    }
342
    if (unlikely(nb > 0)) {
343
        for (sh = 24; nb > 0; nb--, sh -= 8) {
344
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
345
            addr = addr_add(addr, 1);
346
        }
347
    }
348
}
349

    
350
static void do_dcbz(target_ulong addr, int dcache_line_size)
351
{
352
    addr &= ~(dcache_line_size - 1);
353
    int i;
354
    for (i = 0 ; i < dcache_line_size ; i += 4) {
355
        stl(addr + i , 0);
356
    }
357
    if (env->reserve == addr)
358
        env->reserve = (target_ulong)-1ULL;
359
}
360

    
361
void helper_dcbz(target_ulong addr)
362
{
363
    do_dcbz(addr, env->dcache_line_size);
364
}
365

    
366
void helper_dcbz_970(target_ulong addr)
367
{
368
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
369
        do_dcbz(addr, 32);
370
    else
371
        do_dcbz(addr, env->dcache_line_size);
372
}
373

    
374
void helper_icbi(target_ulong addr)
375
{
376
    uint32_t tmp;
377

    
378
    addr &= ~(env->dcache_line_size - 1);
379
    /* Invalidate one cache line :
380
     * PowerPC specification says this is to be treated like a load
381
     * (not a fetch) by the MMU. To be sure it will be so,
382
     * do the load "by hand".
383
     */
384
    tmp = ldl(addr);
385
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
386
}
387

    
388
// XXX: to be tested
389
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
390
{
391
    int i, c, d;
392
    d = 24;
393
    for (i = 0; i < xer_bc; i++) {
394
        c = ldub(addr);
395
        addr = addr_add(addr, 1);
396
        /* ra (if not 0) and rb are never modified */
397
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
398
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
399
        }
400
        if (unlikely(c == xer_cmp))
401
            break;
402
        if (likely(d != 0)) {
403
            d -= 8;
404
        } else {
405
            d = 24;
406
            reg++;
407
            reg = reg & 0x1F;
408
        }
409
    }
410
    return i;
411
}
412

    
413
/*****************************************************************************/
414
/* Fixed point operations helpers */
415
#if defined(TARGET_PPC64)
416

    
417
/* multiply high word */
418
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
419
{
420
    uint64_t tl, th;
421

    
422
    muls64(&tl, &th, arg1, arg2);
423
    return th;
424
}
425

    
426
/* multiply high word unsigned */
427
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
428
{
429
    uint64_t tl, th;
430

    
431
    mulu64(&tl, &th, arg1, arg2);
432
    return th;
433
}
434

    
435
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
436
{
437
    int64_t th;
438
    uint64_t tl;
439

    
440
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
441
    /* If th != 0 && th != -1, then we had an overflow */
442
    if (likely((uint64_t)(th + 1) <= 1)) {
443
        env->xer &= ~(1 << XER_OV);
444
    } else {
445
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
446
    }
447
    return (int64_t)tl;
448
}
449
#endif
450

    
451
target_ulong helper_cntlzw (target_ulong t)
452
{
453
    return clz32(t);
454
}
455

    
456
#if defined(TARGET_PPC64)
457
target_ulong helper_cntlzd (target_ulong t)
458
{
459
    return clz64(t);
460
}
461
#endif
462

    
463
/* shift right arithmetic helper */
464
target_ulong helper_sraw (target_ulong value, target_ulong shift)
465
{
466
    int32_t ret;
467

    
468
    if (likely(!(shift & 0x20))) {
469
        if (likely((uint32_t)shift != 0)) {
470
            shift &= 0x1f;
471
            ret = (int32_t)value >> shift;
472
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
473
                env->xer &= ~(1 << XER_CA);
474
            } else {
475
                env->xer |= (1 << XER_CA);
476
            }
477
        } else {
478
            ret = (int32_t)value;
479
            env->xer &= ~(1 << XER_CA);
480
        }
481
    } else {
482
        ret = (int32_t)value >> 31;
483
        if (ret) {
484
            env->xer |= (1 << XER_CA);
485
        } else {
486
            env->xer &= ~(1 << XER_CA);
487
        }
488
    }
489
    return (target_long)ret;
490
}
491

    
492
#if defined(TARGET_PPC64)
493
target_ulong helper_srad (target_ulong value, target_ulong shift)
494
{
495
    int64_t ret;
496

    
497
    if (likely(!(shift & 0x40))) {
498
        if (likely((uint64_t)shift != 0)) {
499
            shift &= 0x3f;
500
            ret = (int64_t)value >> shift;
501
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
502
                env->xer &= ~(1 << XER_CA);
503
            } else {
504
                env->xer |= (1 << XER_CA);
505
            }
506
        } else {
507
            ret = (int64_t)value;
508
            env->xer &= ~(1 << XER_CA);
509
        }
510
    } else {
511
        ret = (int64_t)value >> 63;
512
        if (ret) {
513
            env->xer |= (1 << XER_CA);
514
        } else {
515
            env->xer &= ~(1 << XER_CA);
516
        }
517
    }
518
    return ret;
519
}
520
#endif
521

    
522
target_ulong helper_popcntb (target_ulong val)
523
{
524
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
525
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
526
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
527
    return val;
528
}
529

    
530
#if defined(TARGET_PPC64)
531
target_ulong helper_popcntb_64 (target_ulong val)
532
{
533
    val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
534
    val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
535
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
536
    return val;
537
}
538
#endif
539

    
540
/*****************************************************************************/
541
/* Floating point operations helpers */
542
uint64_t helper_float32_to_float64(uint32_t arg)
543
{
544
    CPU_FloatU f;
545
    CPU_DoubleU d;
546
    f.l = arg;
547
    d.d = float32_to_float64(f.f, &env->fp_status);
548
    return d.ll;
549
}
550

    
551
uint32_t helper_float64_to_float32(uint64_t arg)
552
{
553
    CPU_FloatU f;
554
    CPU_DoubleU d;
555
    d.ll = arg;
556
    f.f = float64_to_float32(d.d, &env->fp_status);
557
    return f.l;
558
}
559

    
560
static always_inline int isden (float64 d)
561
{
562
    CPU_DoubleU u;
563

    
564
    u.d = d;
565

    
566
    return ((u.ll >> 52) & 0x7FF) == 0;
567
}
568

    
569
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
570
{
571
    CPU_DoubleU farg;
572
    int isneg;
573
    int ret;
574
    farg.ll = arg;
575
    isneg = float64_is_neg(farg.d);
576
    if (unlikely(float64_is_nan(farg.d))) {
577
        if (float64_is_signaling_nan(farg.d)) {
578
            /* Signaling NaN: flags are undefined */
579
            ret = 0x00;
580
        } else {
581
            /* Quiet NaN */
582
            ret = 0x11;
583
        }
584
    } else if (unlikely(float64_is_infinity(farg.d))) {
585
        /* +/- infinity */
586
        if (isneg)
587
            ret = 0x09;
588
        else
589
            ret = 0x05;
590
    } else {
591
        if (float64_is_zero(farg.d)) {
592
            /* +/- zero */
593
            if (isneg)
594
                ret = 0x12;
595
            else
596
                ret = 0x02;
597
        } else {
598
            if (isden(farg.d)) {
599
                /* Denormalized numbers */
600
                ret = 0x10;
601
            } else {
602
                /* Normalized numbers */
603
                ret = 0x00;
604
            }
605
            if (isneg) {
606
                ret |= 0x08;
607
            } else {
608
                ret |= 0x04;
609
            }
610
        }
611
    }
612
    if (set_fprf) {
613
        /* We update FPSCR_FPRF */
614
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
615
        env->fpscr |= ret << FPSCR_FPRF;
616
    }
617
    /* We just need fpcc to update Rc1 */
618
    return ret & 0xF;
619
}
620

    
621
/* Floating-point invalid operations exception */
622
static always_inline uint64_t fload_invalid_op_excp (int op)
623
{
624
    uint64_t ret = 0;
625
    int ve;
626

    
627
    ve = fpscr_ve;
628
    switch (op) {
629
    case POWERPC_EXCP_FP_VXSNAN:
630
        env->fpscr |= 1 << FPSCR_VXSNAN;
631
        break;
632
    case POWERPC_EXCP_FP_VXSOFT:
633
        env->fpscr |= 1 << FPSCR_VXSOFT;
634
        break;
635
    case POWERPC_EXCP_FP_VXISI:
636
        /* Magnitude subtraction of infinities */
637
        env->fpscr |= 1 << FPSCR_VXISI;
638
        goto update_arith;
639
    case POWERPC_EXCP_FP_VXIDI:
640
        /* Division of infinity by infinity */
641
        env->fpscr |= 1 << FPSCR_VXIDI;
642
        goto update_arith;
643
    case POWERPC_EXCP_FP_VXZDZ:
644
        /* Division of zero by zero */
645
        env->fpscr |= 1 << FPSCR_VXZDZ;
646
        goto update_arith;
647
    case POWERPC_EXCP_FP_VXIMZ:
648
        /* Multiplication of zero by infinity */
649
        env->fpscr |= 1 << FPSCR_VXIMZ;
650
        goto update_arith;
651
    case POWERPC_EXCP_FP_VXVC:
652
        /* Ordered comparison of NaN */
653
        env->fpscr |= 1 << FPSCR_VXVC;
654
        env->fpscr &= ~(0xF << FPSCR_FPCC);
655
        env->fpscr |= 0x11 << FPSCR_FPCC;
656
        /* We must update the target FPR before raising the exception */
657
        if (ve != 0) {
658
            env->exception_index = POWERPC_EXCP_PROGRAM;
659
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
660
            /* Update the floating-point enabled exception summary */
661
            env->fpscr |= 1 << FPSCR_FEX;
662
            /* Exception is differed */
663
            ve = 0;
664
        }
665
        break;
666
    case POWERPC_EXCP_FP_VXSQRT:
667
        /* Square root of a negative number */
668
        env->fpscr |= 1 << FPSCR_VXSQRT;
669
    update_arith:
670
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
671
        if (ve == 0) {
672
            /* Set the result to quiet NaN */
673
            ret = 0xFFF8000000000000ULL;
674
            env->fpscr &= ~(0xF << FPSCR_FPCC);
675
            env->fpscr |= 0x11 << FPSCR_FPCC;
676
        }
677
        break;
678
    case POWERPC_EXCP_FP_VXCVI:
679
        /* Invalid conversion */
680
        env->fpscr |= 1 << FPSCR_VXCVI;
681
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
682
        if (ve == 0) {
683
            /* Set the result to quiet NaN */
684
            ret = 0xFFF8000000000000ULL;
685
            env->fpscr &= ~(0xF << FPSCR_FPCC);
686
            env->fpscr |= 0x11 << FPSCR_FPCC;
687
        }
688
        break;
689
    }
690
    /* Update the floating-point invalid operation summary */
691
    env->fpscr |= 1 << FPSCR_VX;
692
    /* Update the floating-point exception summary */
693
    env->fpscr |= 1 << FPSCR_FX;
694
    if (ve != 0) {
695
        /* Update the floating-point enabled exception summary */
696
        env->fpscr |= 1 << FPSCR_FEX;
697
        if (msr_fe0 != 0 || msr_fe1 != 0)
698
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
699
    }
700
    return ret;
701
}
702

    
703
static always_inline void float_zero_divide_excp (void)
704
{
705
    env->fpscr |= 1 << FPSCR_ZX;
706
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
707
    /* Update the floating-point exception summary */
708
    env->fpscr |= 1 << FPSCR_FX;
709
    if (fpscr_ze != 0) {
710
        /* Update the floating-point enabled exception summary */
711
        env->fpscr |= 1 << FPSCR_FEX;
712
        if (msr_fe0 != 0 || msr_fe1 != 0) {
713
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
714
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
715
        }
716
    }
717
}
718

    
719
static always_inline void float_overflow_excp (void)
720
{
721
    env->fpscr |= 1 << FPSCR_OX;
722
    /* Update the floating-point exception summary */
723
    env->fpscr |= 1 << FPSCR_FX;
724
    if (fpscr_oe != 0) {
725
        /* XXX: should adjust the result */
726
        /* Update the floating-point enabled exception summary */
727
        env->fpscr |= 1 << FPSCR_FEX;
728
        /* We must update the target FPR before raising the exception */
729
        env->exception_index = POWERPC_EXCP_PROGRAM;
730
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
731
    } else {
732
        env->fpscr |= 1 << FPSCR_XX;
733
        env->fpscr |= 1 << FPSCR_FI;
734
    }
735
}
736

    
737
static always_inline void float_underflow_excp (void)
738
{
739
    env->fpscr |= 1 << FPSCR_UX;
740
    /* Update the floating-point exception summary */
741
    env->fpscr |= 1 << FPSCR_FX;
742
    if (fpscr_ue != 0) {
743
        /* XXX: should adjust the result */
744
        /* Update the floating-point enabled exception summary */
745
        env->fpscr |= 1 << FPSCR_FEX;
746
        /* We must update the target FPR before raising the exception */
747
        env->exception_index = POWERPC_EXCP_PROGRAM;
748
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
749
    }
750
}
751

    
752
static always_inline void float_inexact_excp (void)
753
{
754
    env->fpscr |= 1 << FPSCR_XX;
755
    /* Update the floating-point exception summary */
756
    env->fpscr |= 1 << FPSCR_FX;
757
    if (fpscr_xe != 0) {
758
        /* Update the floating-point enabled exception summary */
759
        env->fpscr |= 1 << FPSCR_FEX;
760
        /* We must update the target FPR before raising the exception */
761
        env->exception_index = POWERPC_EXCP_PROGRAM;
762
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
763
    }
764
}
765

    
766
static always_inline void fpscr_set_rounding_mode (void)
767
{
768
    int rnd_type;
769

    
770
    /* Set rounding mode */
771
    switch (fpscr_rn) {
772
    case 0:
773
        /* Best approximation (round to nearest) */
774
        rnd_type = float_round_nearest_even;
775
        break;
776
    case 1:
777
        /* Smaller magnitude (round toward zero) */
778
        rnd_type = float_round_to_zero;
779
        break;
780
    case 2:
781
        /* Round toward +infinite */
782
        rnd_type = float_round_up;
783
        break;
784
    default:
785
    case 3:
786
        /* Round toward -infinite */
787
        rnd_type = float_round_down;
788
        break;
789
    }
790
    set_float_rounding_mode(rnd_type, &env->fp_status);
791
}
792

    
793
void helper_fpscr_clrbit (uint32_t bit)
794
{
795
    int prev;
796

    
797
    prev = (env->fpscr >> bit) & 1;
798
    env->fpscr &= ~(1 << bit);
799
    if (prev == 1) {
800
        switch (bit) {
801
        case FPSCR_RN1:
802
        case FPSCR_RN:
803
            fpscr_set_rounding_mode();
804
            break;
805
        default:
806
            break;
807
        }
808
    }
809
}
810

    
811
void helper_fpscr_setbit (uint32_t bit)
812
{
813
    int prev;
814

    
815
    prev = (env->fpscr >> bit) & 1;
816
    env->fpscr |= 1 << bit;
817
    if (prev == 0) {
818
        switch (bit) {
819
        case FPSCR_VX:
820
            env->fpscr |= 1 << FPSCR_FX;
821
            if (fpscr_ve)
822
                goto raise_ve;
823
        case FPSCR_OX:
824
            env->fpscr |= 1 << FPSCR_FX;
825
            if (fpscr_oe)
826
                goto raise_oe;
827
            break;
828
        case FPSCR_UX:
829
            env->fpscr |= 1 << FPSCR_FX;
830
            if (fpscr_ue)
831
                goto raise_ue;
832
            break;
833
        case FPSCR_ZX:
834
            env->fpscr |= 1 << FPSCR_FX;
835
            if (fpscr_ze)
836
                goto raise_ze;
837
            break;
838
        case FPSCR_XX:
839
            env->fpscr |= 1 << FPSCR_FX;
840
            if (fpscr_xe)
841
                goto raise_xe;
842
            break;
843
        case FPSCR_VXSNAN:
844
        case FPSCR_VXISI:
845
        case FPSCR_VXIDI:
846
        case FPSCR_VXZDZ:
847
        case FPSCR_VXIMZ:
848
        case FPSCR_VXVC:
849
        case FPSCR_VXSOFT:
850
        case FPSCR_VXSQRT:
851
        case FPSCR_VXCVI:
852
            env->fpscr |= 1 << FPSCR_VX;
853
            env->fpscr |= 1 << FPSCR_FX;
854
            if (fpscr_ve != 0)
855
                goto raise_ve;
856
            break;
857
        case FPSCR_VE:
858
            if (fpscr_vx != 0) {
859
            raise_ve:
860
                env->error_code = POWERPC_EXCP_FP;
861
                if (fpscr_vxsnan)
862
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
863
                if (fpscr_vxisi)
864
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
865
                if (fpscr_vxidi)
866
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
867
                if (fpscr_vxzdz)
868
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
869
                if (fpscr_vximz)
870
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
871
                if (fpscr_vxvc)
872
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
873
                if (fpscr_vxsoft)
874
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
875
                if (fpscr_vxsqrt)
876
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
877
                if (fpscr_vxcvi)
878
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
879
                goto raise_excp;
880
            }
881
            break;
882
        case FPSCR_OE:
883
            if (fpscr_ox != 0) {
884
            raise_oe:
885
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
886
                goto raise_excp;
887
            }
888
            break;
889
        case FPSCR_UE:
890
            if (fpscr_ux != 0) {
891
            raise_ue:
892
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
893
                goto raise_excp;
894
            }
895
            break;
896
        case FPSCR_ZE:
897
            if (fpscr_zx != 0) {
898
            raise_ze:
899
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
900
                goto raise_excp;
901
            }
902
            break;
903
        case FPSCR_XE:
904
            if (fpscr_xx != 0) {
905
            raise_xe:
906
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
907
                goto raise_excp;
908
            }
909
            break;
910
        case FPSCR_RN1:
911
        case FPSCR_RN:
912
            fpscr_set_rounding_mode();
913
            break;
914
        default:
915
            break;
916
        raise_excp:
917
            /* Update the floating-point enabled exception summary */
918
            env->fpscr |= 1 << FPSCR_FEX;
919
                /* We have to update Rc1 before raising the exception */
920
            env->exception_index = POWERPC_EXCP_PROGRAM;
921
            break;
922
        }
923
    }
924
}
925

    
926
void helper_store_fpscr (uint64_t arg, uint32_t mask)
927
{
928
    /*
929
     * We use only the 32 LSB of the incoming fpr
930
     */
931
    uint32_t prev, new;
932
    int i;
933

    
934
    prev = env->fpscr;
935
    new = (uint32_t)arg;
936
    new &= ~0x60000000;
937
    new |= prev & 0x60000000;
938
    for (i = 0; i < 8; i++) {
939
        if (mask & (1 << i)) {
940
            env->fpscr &= ~(0xF << (4 * i));
941
            env->fpscr |= new & (0xF << (4 * i));
942
        }
943
    }
944
    /* Update VX and FEX */
945
    if (fpscr_ix != 0)
946
        env->fpscr |= 1 << FPSCR_VX;
947
    else
948
        env->fpscr &= ~(1 << FPSCR_VX);
949
    if ((fpscr_ex & fpscr_eex) != 0) {
950
        env->fpscr |= 1 << FPSCR_FEX;
951
        env->exception_index = POWERPC_EXCP_PROGRAM;
952
        /* XXX: we should compute it properly */
953
        env->error_code = POWERPC_EXCP_FP;
954
    }
955
    else
956
        env->fpscr &= ~(1 << FPSCR_FEX);
957
    fpscr_set_rounding_mode();
958
}
959

    
960
void helper_float_check_status (void)
961
{
962
#ifdef CONFIG_SOFTFLOAT
963
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
964
        (env->error_code & POWERPC_EXCP_FP)) {
965
        /* Differred floating-point exception after target FPR update */
966
        if (msr_fe0 != 0 || msr_fe1 != 0)
967
            helper_raise_exception_err(env->exception_index, env->error_code);
968
    } else {
969
        int status = get_float_exception_flags(&env->fp_status);
970
        if (status & float_flag_divbyzero) {
971
            float_zero_divide_excp();
972
        } else if (status & float_flag_overflow) {
973
            float_overflow_excp();
974
        } else if (status & float_flag_underflow) {
975
            float_underflow_excp();
976
        } else if (status & float_flag_inexact) {
977
            float_inexact_excp();
978
        }
979
    }
980
#else
981
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
982
        (env->error_code & POWERPC_EXCP_FP)) {
983
        /* Differred floating-point exception after target FPR update */
984
        if (msr_fe0 != 0 || msr_fe1 != 0)
985
            helper_raise_exception_err(env->exception_index, env->error_code);
986
    }
987
#endif
988
}
989

    
990
#ifdef CONFIG_SOFTFLOAT
991
void helper_reset_fpstatus (void)
992
{
993
    set_float_exception_flags(0, &env->fp_status);
994
}
995
#endif
996

    
997
/* fadd - fadd. */
998
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
999
{
1000
    CPU_DoubleU farg1, farg2;
1001

    
1002
    farg1.ll = arg1;
1003
    farg2.ll = arg2;
1004
#if USE_PRECISE_EMULATION
1005
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1006
                 float64_is_signaling_nan(farg2.d))) {
1007
        /* sNaN addition */
1008
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1009
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1010
                      float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1011
        /* Magnitude subtraction of infinities */
1012
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1013
    } else {
1014
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1015
    }
1016
#else
1017
    farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1018
#endif
1019
    return farg1.ll;
1020
}
1021

    
1022
/* fsub - fsub. */
1023
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1024
{
1025
    CPU_DoubleU farg1, farg2;
1026

    
1027
    farg1.ll = arg1;
1028
    farg2.ll = arg2;
1029
#if USE_PRECISE_EMULATION
1030
{
1031
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1032
                 float64_is_signaling_nan(farg2.d))) {
1033
        /* sNaN subtraction */
1034
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1035
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1036
                      float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1037
        /* Magnitude subtraction of infinities */
1038
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1039
    } else {
1040
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1041
    }
1042
}
1043
#else
1044
    farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1045
#endif
1046
    return farg1.ll;
1047
}
1048

    
1049
/* fmul - fmul. */
1050
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1051
{
1052
    CPU_DoubleU farg1, farg2;
1053

    
1054
    farg1.ll = arg1;
1055
    farg2.ll = arg2;
1056
#if USE_PRECISE_EMULATION
1057
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1058
                 float64_is_signaling_nan(farg2.d))) {
1059
        /* sNaN multiplication */
1060
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1061
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1062
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1063
        /* Multiplication of zero by infinity */
1064
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1065
    } else {
1066
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1067
    }
1068
#else
1069
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1070
#endif
1071
    return farg1.ll;
1072
}
1073

    
1074
/* fdiv - fdiv. */
1075
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1076
{
1077
    CPU_DoubleU farg1, farg2;
1078

    
1079
    farg1.ll = arg1;
1080
    farg2.ll = arg2;
1081
#if USE_PRECISE_EMULATION
1082
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1083
                 float64_is_signaling_nan(farg2.d))) {
1084
        /* sNaN division */
1085
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1086
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1087
        /* Division of infinity by infinity */
1088
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1089
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1090
        /* Division of zero by zero */
1091
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1092
    } else {
1093
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1094
    }
1095
#else
1096
    farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1097
#endif
1098
    return farg1.ll;
1099
}
1100

    
1101
/* fabs */
1102
uint64_t helper_fabs (uint64_t arg)
1103
{
1104
    CPU_DoubleU farg;
1105

    
1106
    farg.ll = arg;
1107
    farg.d = float64_abs(farg.d);
1108
    return farg.ll;
1109
}
1110

    
1111
/* fnabs */
1112
uint64_t helper_fnabs (uint64_t arg)
1113
{
1114
    CPU_DoubleU farg;
1115

    
1116
    farg.ll = arg;
1117
    farg.d = float64_abs(farg.d);
1118
    farg.d = float64_chs(farg.d);
1119
    return farg.ll;
1120
}
1121

    
1122
/* fneg */
1123
uint64_t helper_fneg (uint64_t arg)
1124
{
1125
    CPU_DoubleU farg;
1126

    
1127
    farg.ll = arg;
1128
    farg.d = float64_chs(farg.d);
1129
    return farg.ll;
1130
}
1131

    
1132
/* fctiw - fctiw. */
1133
uint64_t helper_fctiw (uint64_t arg)
1134
{
1135
    CPU_DoubleU farg;
1136
    farg.ll = arg;
1137

    
1138
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1139
        /* sNaN conversion */
1140
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1141
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1142
        /* qNan / infinity conversion */
1143
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1144
    } else {
1145
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1146
#if USE_PRECISE_EMULATION
1147
        /* XXX: higher bits are not supposed to be significant.
1148
         *     to make tests easier, return the same as a real PowerPC 750
1149
         */
1150
        farg.ll |= 0xFFF80000ULL << 32;
1151
#endif
1152
    }
1153
    return farg.ll;
1154
}
1155

    
1156
/* fctiwz - fctiwz. */
1157
uint64_t helper_fctiwz (uint64_t arg)
1158
{
1159
    CPU_DoubleU farg;
1160
    farg.ll = arg;
1161

    
1162
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1163
        /* sNaN conversion */
1164
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1165
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1166
        /* qNan / infinity conversion */
1167
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1168
    } else {
1169
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1170
#if USE_PRECISE_EMULATION
1171
        /* XXX: higher bits are not supposed to be significant.
1172
         *     to make tests easier, return the same as a real PowerPC 750
1173
         */
1174
        farg.ll |= 0xFFF80000ULL << 32;
1175
#endif
1176
    }
1177
    return farg.ll;
1178
}
1179

    
1180
#if defined(TARGET_PPC64)
1181
/* fcfid - fcfid. */
1182
uint64_t helper_fcfid (uint64_t arg)
1183
{
1184
    CPU_DoubleU farg;
1185
    farg.d = int64_to_float64(arg, &env->fp_status);
1186
    return farg.ll;
1187
}
1188

    
1189
/* fctid - fctid. */
1190
uint64_t helper_fctid (uint64_t arg)
1191
{
1192
    CPU_DoubleU farg;
1193
    farg.ll = arg;
1194

    
1195
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1196
        /* sNaN conversion */
1197
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1198
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1199
        /* qNan / infinity conversion */
1200
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1201
    } else {
1202
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1203
    }
1204
    return farg.ll;
1205
}
1206

    
1207
/* fctidz - fctidz. */
1208
uint64_t helper_fctidz (uint64_t arg)
1209
{
1210
    CPU_DoubleU farg;
1211
    farg.ll = arg;
1212

    
1213
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1214
        /* sNaN conversion */
1215
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1216
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1217
        /* qNan / infinity conversion */
1218
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1219
    } else {
1220
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1221
    }
1222
    return farg.ll;
1223
}
1224

    
1225
#endif
1226

    
1227
static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1228
{
1229
    CPU_DoubleU farg;
1230
    farg.ll = arg;
1231

    
1232
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1233
        /* sNaN round */
1234
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1235
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1236
        /* qNan / infinity round */
1237
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1238
    } else {
1239
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1240
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1241
        /* Restore rounding mode from FPSCR */
1242
        fpscr_set_rounding_mode();
1243
    }
1244
    return farg.ll;
1245
}
1246

    
1247
uint64_t helper_frin (uint64_t arg)
1248
{
1249
    return do_fri(arg, float_round_nearest_even);
1250
}
1251

    
1252
uint64_t helper_friz (uint64_t arg)
1253
{
1254
    return do_fri(arg, float_round_to_zero);
1255
}
1256

    
1257
uint64_t helper_frip (uint64_t arg)
1258
{
1259
    return do_fri(arg, float_round_up);
1260
}
1261

    
1262
uint64_t helper_frim (uint64_t arg)
1263
{
1264
    return do_fri(arg, float_round_down);
1265
}
1266

    
1267
/* fmadd - fmadd. */
1268
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1269
{
1270
    CPU_DoubleU farg1, farg2, farg3;
1271

    
1272
    farg1.ll = arg1;
1273
    farg2.ll = arg2;
1274
    farg3.ll = arg3;
1275
#if USE_PRECISE_EMULATION
1276
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1277
                 float64_is_signaling_nan(farg2.d) ||
1278
                 float64_is_signaling_nan(farg3.d))) {
1279
        /* sNaN operation */
1280
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1281
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1282
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1283
        /* Multiplication of zero by infinity */
1284
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1285
    } else {
1286
#ifdef FLOAT128
1287
        /* This is the way the PowerPC specification defines it */
1288
        float128 ft0_128, ft1_128;
1289

    
1290
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1291
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1292
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1293
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1294
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1295
            /* Magnitude subtraction of infinities */
1296
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1297
        } else {
1298
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1299
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1300
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1301
        }
1302
#else
1303
        /* This is OK on x86 hosts */
1304
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1305
#endif
1306
    }
1307
#else
1308
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1309
    farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1310
#endif
1311
    return farg1.ll;
1312
}
1313

    
1314
/* fmsub - fmsub. */
1315
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1316
{
1317
    CPU_DoubleU farg1, farg2, farg3;
1318

    
1319
    farg1.ll = arg1;
1320
    farg2.ll = arg2;
1321
    farg3.ll = arg3;
1322
#if USE_PRECISE_EMULATION
1323
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1324
                 float64_is_signaling_nan(farg2.d) ||
1325
                 float64_is_signaling_nan(farg3.d))) {
1326
        /* sNaN operation */
1327
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1328
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1329
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1330
        /* Multiplication of zero by infinity */
1331
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1332
    } else {
1333
#ifdef FLOAT128
1334
        /* This is the way the PowerPC specification defines it */
1335
        float128 ft0_128, ft1_128;
1336

    
1337
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1338
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1339
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1340
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1341
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1342
            /* Magnitude subtraction of infinities */
1343
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1344
        } else {
1345
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1346
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1347
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1348
        }
1349
#else
1350
        /* This is OK on x86 hosts */
1351
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1352
#endif
1353
    }
1354
#else
1355
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1356
    farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1357
#endif
1358
    return farg1.ll;
1359
}
1360

    
1361
/* fnmadd - fnmadd. */
1362
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1363
{
1364
    CPU_DoubleU farg1, farg2, farg3;
1365

    
1366
    farg1.ll = arg1;
1367
    farg2.ll = arg2;
1368
    farg3.ll = arg3;
1369

    
1370
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1371
                 float64_is_signaling_nan(farg2.d) ||
1372
                 float64_is_signaling_nan(farg3.d))) {
1373
        /* sNaN operation */
1374
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1375
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1376
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1377
        /* Multiplication of zero by infinity */
1378
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1379
    } else {
1380
#if USE_PRECISE_EMULATION
1381
#ifdef FLOAT128
1382
        /* This is the way the PowerPC specification defines it */
1383
        float128 ft0_128, ft1_128;
1384

    
1385
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1386
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1387
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1388
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1389
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1390
            /* Magnitude subtraction of infinities */
1391
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1392
        } else {
1393
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1394
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1395
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1396
        }
1397
#else
1398
        /* This is OK on x86 hosts */
1399
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1400
#endif
1401
#else
1402
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1403
        farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1404
#endif
1405
        if (likely(!float64_is_nan(farg1.d)))
1406
            farg1.d = float64_chs(farg1.d);
1407
    }
1408
    return farg1.ll;
1409
}
1410

    
1411
/* fnmsub - fnmsub. */
1412
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1413
{
1414
    CPU_DoubleU farg1, farg2, farg3;
1415

    
1416
    farg1.ll = arg1;
1417
    farg2.ll = arg2;
1418
    farg3.ll = arg3;
1419

    
1420
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1421
                 float64_is_signaling_nan(farg2.d) ||
1422
                 float64_is_signaling_nan(farg3.d))) {
1423
        /* sNaN operation */
1424
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1425
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1426
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1427
        /* Multiplication of zero by infinity */
1428
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1429
    } else {
1430
#if USE_PRECISE_EMULATION
1431
#ifdef FLOAT128
1432
        /* This is the way the PowerPC specification defines it */
1433
        float128 ft0_128, ft1_128;
1434

    
1435
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1436
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1437
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1438
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1439
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1440
            /* Magnitude subtraction of infinities */
1441
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1442
        } else {
1443
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1444
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1445
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1446
        }
1447
#else
1448
        /* This is OK on x86 hosts */
1449
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1450
#endif
1451
#else
1452
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1453
        farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1454
#endif
1455
        if (likely(!float64_is_nan(farg1.d)))
1456
            farg1.d = float64_chs(farg1.d);
1457
    }
1458
    return farg1.ll;
1459
}
1460

    
1461
/* frsp - frsp. */
1462
uint64_t helper_frsp (uint64_t arg)
1463
{
1464
    CPU_DoubleU farg;
1465
    float32 f32;
1466
    farg.ll = arg;
1467

    
1468
#if USE_PRECISE_EMULATION
1469
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1470
        /* sNaN square root */
1471
       farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1472
    } else {
1473
       f32 = float64_to_float32(farg.d, &env->fp_status);
1474
       farg.d = float32_to_float64(f32, &env->fp_status);
1475
    }
1476
#else
1477
    f32 = float64_to_float32(farg.d, &env->fp_status);
1478
    farg.d = float32_to_float64(f32, &env->fp_status);
1479
#endif
1480
    return farg.ll;
1481
}
1482

    
1483
/* fsqrt - fsqrt. */
1484
uint64_t helper_fsqrt (uint64_t arg)
1485
{
1486
    CPU_DoubleU farg;
1487
    farg.ll = arg;
1488

    
1489
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1490
        /* sNaN square root */
1491
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1492
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1493
        /* Square root of a negative nonzero number */
1494
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1495
    } else {
1496
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1497
    }
1498
    return farg.ll;
1499
}
1500

    
1501
/* fre - fre. */
1502
uint64_t helper_fre (uint64_t arg)
1503
{
1504
    CPU_DoubleU fone, farg;
1505
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1506
    farg.ll = arg;
1507

    
1508
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1509
        /* sNaN reciprocal */
1510
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1511
    } else {
1512
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1513
    }
1514
    return farg.d;
1515
}
1516

    
1517
/* fres - fres. */
1518
uint64_t helper_fres (uint64_t arg)
1519
{
1520
    CPU_DoubleU fone, farg;
1521
    float32 f32;
1522
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1523
    farg.ll = arg;
1524

    
1525
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1526
        /* sNaN reciprocal */
1527
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1528
    } else {
1529
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1530
        f32 = float64_to_float32(farg.d, &env->fp_status);
1531
        farg.d = float32_to_float64(f32, &env->fp_status);
1532
    }
1533
    return farg.ll;
1534
}
1535

    
1536
/* frsqrte  - frsqrte. */
1537
uint64_t helper_frsqrte (uint64_t arg)
1538
{
1539
    CPU_DoubleU fone, farg;
1540
    float32 f32;
1541
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1542
    farg.ll = arg;
1543

    
1544
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1545
        /* sNaN reciprocal square root */
1546
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1547
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1548
        /* Reciprocal square root of a negative nonzero number */
1549
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1550
    } else {
1551
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1552
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1553
        f32 = float64_to_float32(farg.d, &env->fp_status);
1554
        farg.d = float32_to_float64(f32, &env->fp_status);
1555
    }
1556
    return farg.ll;
1557
}
1558

    
1559
/* fsel - fsel. */
1560
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1561
{
1562
    CPU_DoubleU farg1;
1563

    
1564
    farg1.ll = arg1;
1565

    
1566
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1567
        return arg2;
1568
    else
1569
        return arg3;
1570
}
1571

    
1572
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1573
{
1574
    CPU_DoubleU farg1, farg2;
1575
    uint32_t ret = 0;
1576
    farg1.ll = arg1;
1577
    farg2.ll = arg2;
1578

    
1579
    if (unlikely(float64_is_nan(farg1.d) ||
1580
                 float64_is_nan(farg2.d))) {
1581
        ret = 0x01UL;
1582
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1583
        ret = 0x08UL;
1584
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1585
        ret = 0x04UL;
1586
    } else {
1587
        ret = 0x02UL;
1588
    }
1589

    
1590
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1591
    env->fpscr |= ret << FPSCR_FPRF;
1592
    env->crf[crfD] = ret;
1593
    if (unlikely(ret == 0x01UL
1594
                 && (float64_is_signaling_nan(farg1.d) ||
1595
                     float64_is_signaling_nan(farg2.d)))) {
1596
        /* sNaN comparison */
1597
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1598
    }
1599
}
1600

    
1601
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1602
{
1603
    CPU_DoubleU farg1, farg2;
1604
    uint32_t ret = 0;
1605
    farg1.ll = arg1;
1606
    farg2.ll = arg2;
1607

    
1608
    if (unlikely(float64_is_nan(farg1.d) ||
1609
                 float64_is_nan(farg2.d))) {
1610
        ret = 0x01UL;
1611
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1612
        ret = 0x08UL;
1613
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1614
        ret = 0x04UL;
1615
    } else {
1616
        ret = 0x02UL;
1617
    }
1618

    
1619
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1620
    env->fpscr |= ret << FPSCR_FPRF;
1621
    env->crf[crfD] = ret;
1622
    if (unlikely (ret == 0x01UL)) {
1623
        if (float64_is_signaling_nan(farg1.d) ||
1624
            float64_is_signaling_nan(farg2.d)) {
1625
            /* sNaN comparison */
1626
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1627
                                  POWERPC_EXCP_FP_VXVC);
1628
        } else {
1629
            /* qNaN comparison */
1630
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1631
        }
1632
    }
1633
}
1634

    
1635
#if !defined (CONFIG_USER_ONLY)
1636
void helper_store_msr (target_ulong val)
1637
{
1638
    val = hreg_store_msr(env, val, 0);
1639
    if (val != 0) {
1640
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1641
        helper_raise_exception(val);
1642
    }
1643
}
1644

    
1645
static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1646
                                    target_ulong msrm, int keep_msrh)
1647
{
1648
#if defined(TARGET_PPC64)
1649
    if (msr & (1ULL << MSR_SF)) {
1650
        nip = (uint64_t)nip;
1651
        msr &= (uint64_t)msrm;
1652
    } else {
1653
        nip = (uint32_t)nip;
1654
        msr = (uint32_t)(msr & msrm);
1655
        if (keep_msrh)
1656
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1657
    }
1658
#else
1659
    nip = (uint32_t)nip;
1660
    msr &= (uint32_t)msrm;
1661
#endif
1662
    /* XXX: beware: this is false if VLE is supported */
1663
    env->nip = nip & ~((target_ulong)0x00000003);
1664
    hreg_store_msr(env, msr, 1);
1665
#if defined (DEBUG_OP)
1666
    cpu_dump_rfi(env->nip, env->msr);
1667
#endif
1668
    /* No need to raise an exception here,
1669
     * as rfi is always the last insn of a TB
1670
     */
1671
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1672
}
1673

    
1674
void helper_rfi (void)
1675
{
1676
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1677
           ~((target_ulong)0xFFFF0000), 1);
1678
}
1679

    
1680
#if defined(TARGET_PPC64)
1681
void helper_rfid (void)
1682
{
1683
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1684
           ~((target_ulong)0xFFFF0000), 0);
1685
}
1686

    
1687
void helper_hrfid (void)
1688
{
1689
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1690
           ~((target_ulong)0xFFFF0000), 0);
1691
}
1692
#endif
1693
#endif
1694

    
1695
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1696
{
1697
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1698
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1699
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1700
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1701
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1702
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1703
    }
1704
}
1705

    
1706
#if defined(TARGET_PPC64)
1707
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1708
{
1709
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1710
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1711
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1712
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1713
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1714
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1715
}
1716
#endif
1717

    
1718
/*****************************************************************************/
1719
/* PowerPC 601 specific instructions (POWER bridge) */
1720

    
1721
target_ulong helper_clcs (uint32_t arg)
1722
{
1723
    switch (arg) {
1724
    case 0x0CUL:
1725
        /* Instruction cache line size */
1726
        return env->icache_line_size;
1727
        break;
1728
    case 0x0DUL:
1729
        /* Data cache line size */
1730
        return env->dcache_line_size;
1731
        break;
1732
    case 0x0EUL:
1733
        /* Minimum cache line size */
1734
        return (env->icache_line_size < env->dcache_line_size) ?
1735
                env->icache_line_size : env->dcache_line_size;
1736
        break;
1737
    case 0x0FUL:
1738
        /* Maximum cache line size */
1739
        return (env->icache_line_size > env->dcache_line_size) ?
1740
                env->icache_line_size : env->dcache_line_size;
1741
        break;
1742
    default:
1743
        /* Undefined */
1744
        return 0;
1745
        break;
1746
    }
1747
}
1748

    
1749
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1750
{
1751
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1752

    
1753
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1754
        (int32_t)arg2 == 0) {
1755
        env->spr[SPR_MQ] = 0;
1756
        return INT32_MIN;
1757
    } else {
1758
        env->spr[SPR_MQ] = tmp % arg2;
1759
        return  tmp / (int32_t)arg2;
1760
    }
1761
}
1762

    
1763
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1764
{
1765
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1766

    
1767
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1768
        (int32_t)arg2 == 0) {
1769
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1770
        env->spr[SPR_MQ] = 0;
1771
        return INT32_MIN;
1772
    } else {
1773
        env->spr[SPR_MQ] = tmp % arg2;
1774
        tmp /= (int32_t)arg2;
1775
        if ((int32_t)tmp != tmp) {
1776
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1777
        } else {
1778
            env->xer &= ~(1 << XER_OV);
1779
        }
1780
        return tmp;
1781
    }
1782
}
1783

    
1784
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1785
{
1786
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1787
        (int32_t)arg2 == 0) {
1788
        env->spr[SPR_MQ] = 0;
1789
        return INT32_MIN;
1790
    } else {
1791
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1792
        return (int32_t)arg1 / (int32_t)arg2;
1793
    }
1794
}
1795

    
1796
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1797
{
1798
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1799
        (int32_t)arg2 == 0) {
1800
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1801
        env->spr[SPR_MQ] = 0;
1802
        return INT32_MIN;
1803
    } else {
1804
        env->xer &= ~(1 << XER_OV);
1805
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1806
        return (int32_t)arg1 / (int32_t)arg2;
1807
    }
1808
}
1809

    
1810
#if !defined (CONFIG_USER_ONLY)
1811
target_ulong helper_rac (target_ulong addr)
1812
{
1813
    mmu_ctx_t ctx;
1814
    int nb_BATs;
1815
    target_ulong ret = 0;
1816

    
1817
    /* We don't have to generate many instances of this instruction,
1818
     * as rac is supervisor only.
1819
     */
1820
    /* XXX: FIX THIS: Pretend we have no BAT */
1821
    nb_BATs = env->nb_BATs;
1822
    env->nb_BATs = 0;
1823
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1824
        ret = ctx.raddr;
1825
    env->nb_BATs = nb_BATs;
1826
    return ret;
1827
}
1828

    
1829
void helper_rfsvc (void)
1830
{
1831
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1832
}
1833
#endif
1834

    
1835
/*****************************************************************************/
1836
/* 602 specific instructions */
1837
/* mfrom is the most crazy instruction ever seen, imho ! */
1838
/* Real implementation uses a ROM table. Do the same */
1839
/* Extremly decomposed:
1840
 *                      -arg / 256
1841
 * return 256 * log10(10           + 1.0) + 0.5
1842
 */
1843
#if !defined (CONFIG_USER_ONLY)
1844
target_ulong helper_602_mfrom (target_ulong arg)
1845
{
1846
    if (likely(arg < 602)) {
1847
#include "mfrom_table.c"
1848
        return mfrom_ROM_table[arg];
1849
    } else {
1850
        return 0;
1851
    }
1852
}
1853
#endif
1854

    
1855
/*****************************************************************************/
1856
/* Embedded PowerPC specific helpers */
1857

    
1858
/* XXX: to be improved to check access rights when in user-mode */
1859
target_ulong helper_load_dcr (target_ulong dcrn)
1860
{
1861
    target_ulong val = 0;
1862

    
1863
    if (unlikely(env->dcr_env == NULL)) {
1864
        qemu_log("No DCR environment\n");
1865
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1866
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1867
    } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1868
        qemu_log("DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1869
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1870
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1871
    }
1872
    return val;
1873
}
1874

    
1875
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1876
{
1877
    if (unlikely(env->dcr_env == NULL)) {
1878
        qemu_log("No DCR environment\n");
1879
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1880
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1881
    } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1882
        qemu_log("DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1883
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1884
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1885
    }
1886
}
1887

    
1888
#if !defined(CONFIG_USER_ONLY)
1889
void helper_40x_rfci (void)
1890
{
1891
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1892
           ~((target_ulong)0xFFFF0000), 0);
1893
}
1894

    
1895
void helper_rfci (void)
1896
{
1897
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1898
           ~((target_ulong)0x3FFF0000), 0);
1899
}
1900

    
1901
void helper_rfdi (void)
1902
{
1903
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1904
           ~((target_ulong)0x3FFF0000), 0);
1905
}
1906

    
1907
void helper_rfmci (void)
1908
{
1909
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1910
           ~((target_ulong)0x3FFF0000), 0);
1911
}
1912
#endif
1913

    
1914
/* 440 specific */
1915
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1916
{
1917
    target_ulong mask;
1918
    int i;
1919

    
1920
    i = 1;
1921
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1922
        if ((high & mask) == 0) {
1923
            if (update_Rc) {
1924
                env->crf[0] = 0x4;
1925
            }
1926
            goto done;
1927
        }
1928
        i++;
1929
    }
1930
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1931
        if ((low & mask) == 0) {
1932
            if (update_Rc) {
1933
                env->crf[0] = 0x8;
1934
            }
1935
            goto done;
1936
        }
1937
        i++;
1938
    }
1939
    if (update_Rc) {
1940
        env->crf[0] = 0x2;
1941
    }
1942
 done:
1943
    env->xer = (env->xer & ~0x7F) | i;
1944
    if (update_Rc) {
1945
        env->crf[0] |= xer_so;
1946
    }
1947
    return i;
1948
}
1949

    
1950
/*****************************************************************************/
1951
/* Altivec extension helpers */
1952
#if defined(WORDS_BIGENDIAN)
1953
#define HI_IDX 0
1954
#define LO_IDX 1
1955
#else
1956
#define HI_IDX 1
1957
#define LO_IDX 0
1958
#endif
1959

    
1960
#if defined(WORDS_BIGENDIAN)
1961
#define VECTOR_FOR_INORDER_I(index, element)            \
1962
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1963
#else
1964
#define VECTOR_FOR_INORDER_I(index, element)            \
1965
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1966
#endif
1967

    
1968
/* Saturating arithmetic helpers.  */
1969
#define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1970
    static always_inline to_type cvt##from##to (from_type x, int *sat)  \
1971
    {                                                                   \
1972
        to_type r;                                                      \
1973
        if (use_min && x < min) {                                       \
1974
            r = min;                                                    \
1975
            *sat = 1;                                                   \
1976
        } else if (use_max && x > max) {                                \
1977
            r = max;                                                    \
1978
            *sat = 1;                                                   \
1979
        } else {                                                        \
1980
            r = x;                                                      \
1981
        }                                                               \
1982
        return r;                                                       \
1983
    }
1984
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX, 1, 1)
1985
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX, 1, 1)
1986
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX, 1, 1)
1987
SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
1988
SATCVT(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX, 0, 1)
1989
SATCVT(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX, 0, 1)
1990
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX, 1, 1)
1991
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX, 1, 1)
1992
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX, 1, 1)
1993
#undef SATCVT
1994

    
1995
#define LVE(name, access, swap, element)                        \
1996
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
1997
    {                                                           \
1998
        size_t n_elems = ARRAY_SIZE(r->element);                \
1999
        int adjust = HI_IDX*(n_elems-1);                        \
2000
        int sh = sizeof(r->element[0]) >> 1;                    \
2001
        int index = (addr & 0xf) >> sh;                         \
2002
        if(msr_le) {                                            \
2003
            r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2004
        } else {                                                        \
2005
            r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2006
        }                                                               \
2007
    }
2008
#define I(x) (x)
2009
LVE(lvebx, ldub, I, u8)
2010
LVE(lvehx, lduw, bswap16, u16)
2011
LVE(lvewx, ldl, bswap32, u32)
2012
#undef I
2013
#undef LVE
2014

    
2015
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2016
{
2017
    int i, j = (sh & 0xf);
2018

    
2019
    VECTOR_FOR_INORDER_I (i, u8) {
2020
        r->u8[i] = j++;
2021
    }
2022
}
2023

    
2024
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2025
{
2026
    int i, j = 0x10 - (sh & 0xf);
2027

    
2028
    VECTOR_FOR_INORDER_I (i, u8) {
2029
        r->u8[i] = j++;
2030
    }
2031
}
2032

    
2033
#define STVE(name, access, swap, element)                       \
2034
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2035
    {                                                           \
2036
        size_t n_elems = ARRAY_SIZE(r->element);                \
2037
        int adjust = HI_IDX*(n_elems-1);                        \
2038
        int sh = sizeof(r->element[0]) >> 1;                    \
2039
        int index = (addr & 0xf) >> sh;                         \
2040
        if(msr_le) {                                            \
2041
            access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2042
        } else {                                                        \
2043
            access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2044
        }                                                               \
2045
    }
2046
#define I(x) (x)
2047
STVE(stvebx, stb, I, u8)
2048
STVE(stvehx, stw, bswap16, u16)
2049
STVE(stvewx, stl, bswap32, u32)
2050
#undef I
2051
#undef LVE
2052

    
2053
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2054
{
2055
    int i;
2056
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2057
        r->u32[i] = ~a->u32[i] < b->u32[i];
2058
    }
2059
}
2060

    
2061
#define VARITH_DO(name, op, element)        \
2062
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2063
{                                                                       \
2064
    int i;                                                              \
2065
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2066
        r->element[i] = a->element[i] op b->element[i];                 \
2067
    }                                                                   \
2068
}
2069
#define VARITH(suffix, element)                  \
2070
  VARITH_DO(add##suffix, +, element)             \
2071
  VARITH_DO(sub##suffix, -, element)
2072
VARITH(ubm, u8)
2073
VARITH(uhm, u16)
2074
VARITH(uwm, u32)
2075
#undef VARITH_DO
2076
#undef VARITH
2077

    
2078
#define VARITHSAT_CASE(type, op, cvt, element)                          \
2079
    {                                                                   \
2080
        type result = (type)a->element[i] op (type)b->element[i];       \
2081
        r->element[i] = cvt(result, &sat);                              \
2082
    }
2083

    
2084
#define VARITHSAT_DO(name, op, optype, cvt, element)                    \
2085
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2086
    {                                                                   \
2087
        int sat = 0;                                                    \
2088
        int i;                                                          \
2089
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2090
            switch (sizeof(r->element[0])) {                            \
2091
            case 1: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2092
            case 2: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2093
            case 4: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2094
            }                                                           \
2095
        }                                                               \
2096
        if (sat) {                                                      \
2097
            env->vscr |= (1 << VSCR_SAT);                               \
2098
        }                                                               \
2099
    }
2100
#define VARITHSAT_SIGNED(suffix, element, optype, cvt)        \
2101
    VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)    \
2102
    VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2103
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)       \
2104
    VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)     \
2105
    VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2106
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2107
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2108
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2109
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2110
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2111
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2112
#undef VARITHSAT_CASE
2113
#undef VARITHSAT_DO
2114
#undef VARITHSAT_SIGNED
2115
#undef VARITHSAT_UNSIGNED
2116

    
2117
#define VAVG_DO(name, element, etype)                                   \
2118
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2119
    {                                                                   \
2120
        int i;                                                          \
2121
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2122
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2123
            r->element[i] = x >> 1;                                     \
2124
        }                                                               \
2125
    }
2126

    
2127
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2128
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2129
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2130
VAVG(b, s8, int16_t, u8, uint16_t)
2131
VAVG(h, s16, int32_t, u16, uint32_t)
2132
VAVG(w, s32, int64_t, u32, uint64_t)
2133
#undef VAVG_DO
2134
#undef VAVG
2135

    
2136
#define VCMP_DO(suffix, compare, element, record)                       \
2137
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2138
    {                                                                   \
2139
        uint32_t ones = (uint32_t)-1;                                   \
2140
        uint32_t all = ones;                                            \
2141
        uint32_t none = 0;                                              \
2142
        int i;                                                          \
2143
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2144
            uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2145
            switch (sizeof (a->element[0])) {                           \
2146
            case 4: r->u32[i] = result; break;                          \
2147
            case 2: r->u16[i] = result; break;                          \
2148
            case 1: r->u8[i] = result; break;                           \
2149
            }                                                           \
2150
            all &= result;                                              \
2151
            none |= result;                                             \
2152
        }                                                               \
2153
        if (record) {                                                   \
2154
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2155
        }                                                               \
2156
    }
2157
#define VCMP(suffix, compare, element)          \
2158
    VCMP_DO(suffix, compare, element, 0)        \
2159
    VCMP_DO(suffix##_dot, compare, element, 1)
2160
VCMP(equb, ==, u8)
2161
VCMP(equh, ==, u16)
2162
VCMP(equw, ==, u32)
2163
VCMP(gtub, >, u8)
2164
VCMP(gtuh, >, u16)
2165
VCMP(gtuw, >, u32)
2166
VCMP(gtsb, >, s8)
2167
VCMP(gtsh, >, s16)
2168
VCMP(gtsw, >, s32)
2169
#undef VCMP_DO
2170
#undef VCMP
2171

    
2172
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2173
{
2174
    int sat = 0;
2175
    int i;
2176

    
2177
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2178
        int32_t prod = a->s16[i] * b->s16[i];
2179
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2180
        r->s16[i] = cvtswsh (t, &sat);
2181
    }
2182

    
2183
    if (sat) {
2184
        env->vscr |= (1 << VSCR_SAT);
2185
    }
2186
}
2187

    
2188
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2189
{
2190
    int sat = 0;
2191
    int i;
2192

    
2193
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2194
        int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2195
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2196
        r->s16[i] = cvtswsh (t, &sat);
2197
    }
2198

    
2199
    if (sat) {
2200
        env->vscr |= (1 << VSCR_SAT);
2201
    }
2202
}
2203

    
2204
#define VMINMAX_DO(name, compare, element)                              \
2205
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2206
    {                                                                   \
2207
        int i;                                                          \
2208
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2209
            if (a->element[i] compare b->element[i]) {                  \
2210
                r->element[i] = b->element[i];                          \
2211
            } else {                                                    \
2212
                r->element[i] = a->element[i];                          \
2213
            }                                                           \
2214
        }                                                               \
2215
    }
2216
#define VMINMAX(suffix, element)                \
2217
  VMINMAX_DO(min##suffix, >, element)           \
2218
  VMINMAX_DO(max##suffix, <, element)
2219
VMINMAX(sb, s8)
2220
VMINMAX(sh, s16)
2221
VMINMAX(sw, s32)
2222
VMINMAX(ub, u8)
2223
VMINMAX(uh, u16)
2224
VMINMAX(uw, u32)
2225
#undef VMINMAX_DO
2226
#undef VMINMAX
2227

    
2228
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2229
{
2230
    int i;
2231
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2232
        int32_t prod = a->s16[i] * b->s16[i];
2233
        r->s16[i] = (int16_t) (prod + c->s16[i]);
2234
    }
2235
}
2236

    
2237
#define VMRG_DO(name, element, highp)                                   \
2238
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2239
    {                                                                   \
2240
        ppc_avr_t result;                                               \
2241
        int i;                                                          \
2242
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2243
        for (i = 0; i < n_elems/2; i++) {                               \
2244
            if (highp) {                                                \
2245
                result.element[i*2+HI_IDX] = a->element[i];             \
2246
                result.element[i*2+LO_IDX] = b->element[i];             \
2247
            } else {                                                    \
2248
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2249
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2250
            }                                                           \
2251
        }                                                               \
2252
        *r = result;                                                    \
2253
    }
2254
#if defined(WORDS_BIGENDIAN)
2255
#define MRGHI 0
2256
#define MRGLO 1
2257
#else
2258
#define MRGHI 1
2259
#define MRGLO 0
2260
#endif
2261
#define VMRG(suffix, element)                   \
2262
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2263
  VMRG_DO(mrgh##suffix, element, MRGLO)
2264
VMRG(b, u8)
2265
VMRG(h, u16)
2266
VMRG(w, u32)
2267
#undef VMRG_DO
2268
#undef VMRG
2269
#undef MRGHI
2270
#undef MRGLO
2271

    
2272
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2273
{
2274
    int32_t prod[16];
2275
    int i;
2276

    
2277
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2278
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2279
    }
2280

    
2281
    VECTOR_FOR_INORDER_I(i, s32) {
2282
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2283
    }
2284
}
2285

    
2286
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2287
{
2288
    int32_t prod[8];
2289
    int i;
2290

    
2291
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2292
        prod[i] = a->s16[i] * b->s16[i];
2293
    }
2294

    
2295
    VECTOR_FOR_INORDER_I(i, s32) {
2296
        r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2297
    }
2298
}
2299

    
2300
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2301
{
2302
    int32_t prod[8];
2303
    int i;
2304
    int sat = 0;
2305

    
2306
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2307
        prod[i] = (int32_t)a->s16[i] * b->s16[i];
2308
    }
2309

    
2310
    VECTOR_FOR_INORDER_I (i, s32) {
2311
        int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2312
        r->u32[i] = cvtsdsw(t, &sat);
2313
    }
2314

    
2315
    if (sat) {
2316
        env->vscr |= (1 << VSCR_SAT);
2317
    }
2318
}
2319

    
2320
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2321
{
2322
    uint16_t prod[16];
2323
    int i;
2324

    
2325
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2326
        prod[i] = a->u8[i] * b->u8[i];
2327
    }
2328

    
2329
    VECTOR_FOR_INORDER_I(i, u32) {
2330
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2331
    }
2332
}
2333

    
2334
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2335
{
2336
    uint32_t prod[8];
2337
    int i;
2338

    
2339
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2340
        prod[i] = a->u16[i] * b->u16[i];
2341
    }
2342

    
2343
    VECTOR_FOR_INORDER_I(i, u32) {
2344
        r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2345
    }
2346
}
2347

    
2348
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2349
{
2350
    uint32_t prod[8];
2351
    int i;
2352
    int sat = 0;
2353

    
2354
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2355
        prod[i] = a->u16[i] * b->u16[i];
2356
    }
2357

    
2358
    VECTOR_FOR_INORDER_I (i, s32) {
2359
        uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2360
        r->u32[i] = cvtuduw(t, &sat);
2361
    }
2362

    
2363
    if (sat) {
2364
        env->vscr |= (1 << VSCR_SAT);
2365
    }
2366
}
2367

    
2368
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2369
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2370
    {                                                                   \
2371
        int i;                                                          \
2372
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2373
            if (evenp) {                                                \
2374
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2375
            } else {                                                    \
2376
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2377
            }                                                           \
2378
        }                                                               \
2379
    }
2380
#define VMUL(suffix, mul_element, prod_element) \
2381
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2382
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2383
VMUL(sb, s8, s16)
2384
VMUL(sh, s16, s32)
2385
VMUL(ub, u8, u16)
2386
VMUL(uh, u16, u32)
2387
#undef VMUL_DO
2388
#undef VMUL
2389

    
2390
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2391
{
2392
    ppc_avr_t result;
2393
    int i;
2394
    VECTOR_FOR_INORDER_I (i, u8) {
2395
        int s = c->u8[i] & 0x1f;
2396
#if defined(WORDS_BIGENDIAN)
2397
        int index = s & 0xf;
2398
#else
2399
        int index = 15 - (s & 0xf);
2400
#endif
2401
        if (s & 0x10) {
2402
            result.u8[i] = b->u8[index];
2403
        } else {
2404
            result.u8[i] = a->u8[index];
2405
        }
2406
    }
2407
    *r = result;
2408
}
2409

    
2410
#if defined(WORDS_BIGENDIAN)
2411
#define PKBIG 1
2412
#else
2413
#define PKBIG 0
2414
#endif
2415
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2416
{
2417
    int i, j;
2418
    ppc_avr_t result;
2419
#if defined(WORDS_BIGENDIAN)
2420
    const ppc_avr_t *x[2] = { a, b };
2421
#else
2422
    const ppc_avr_t *x[2] = { b, a };
2423
#endif
2424

    
2425
    VECTOR_FOR_INORDER_I (i, u64) {
2426
        VECTOR_FOR_INORDER_I (j, u32){
2427
            uint32_t e = x[i]->u32[j];
2428
            result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2429
                                 ((e >> 6) & 0x3e0) |
2430
                                 ((e >> 3) & 0x1f));
2431
        }
2432
    }
2433
    *r = result;
2434
}
2435

    
2436
#define VPK(suffix, from, to, cvt, dosat)       \
2437
    void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2438
    {                                                                   \
2439
        int i;                                                          \
2440
        int sat = 0;                                                    \
2441
        ppc_avr_t result;                                               \
2442
        ppc_avr_t *a0 = PKBIG ? a : b;                                  \
2443
        ppc_avr_t *a1 = PKBIG ? b : a;                                  \
2444
        VECTOR_FOR_INORDER_I (i, from) {                                \
2445
            result.to[i] = cvt(a0->from[i], &sat);                      \
2446
            result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
2447
        }                                                               \
2448
        *r = result;                                                    \
2449
        if (dosat && sat) {                                             \
2450
            env->vscr |= (1 << VSCR_SAT);                               \
2451
        }                                                               \
2452
    }
2453
#define I(x, y) (x)
2454
VPK(shss, s16, s8, cvtshsb, 1)
2455
VPK(shus, s16, u8, cvtshub, 1)
2456
VPK(swss, s32, s16, cvtswsh, 1)
2457
VPK(swus, s32, u16, cvtswuh, 1)
2458
VPK(uhus, u16, u8, cvtuhub, 1)
2459
VPK(uwus, u32, u16, cvtuwuh, 1)
2460
VPK(uhum, u16, u8, I, 0)
2461
VPK(uwum, u32, u16, I, 0)
2462
#undef I
2463
#undef VPK
2464
#undef PKBIG
2465

    
2466
#define VROTATE(suffix, element)                                        \
2467
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2468
    {                                                                   \
2469
        int i;                                                          \
2470
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2471
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2472
            unsigned int shift = b->element[i] & mask;                  \
2473
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2474
        }                                                               \
2475
    }
2476
VROTATE(b, u8)
2477
VROTATE(h, u16)
2478
VROTATE(w, u32)
2479
#undef VROTATE
2480

    
2481
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2482
{
2483
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2484
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2485
}
2486

    
2487
#if defined(WORDS_BIGENDIAN)
2488
#define LEFT 0
2489
#define RIGHT 1
2490
#else
2491
#define LEFT 1
2492
#define RIGHT 0
2493
#endif
2494
/* The specification says that the results are undefined if all of the
2495
 * shift counts are not identical.  We check to make sure that they are
2496
 * to conform to what real hardware appears to do.  */
2497
#define VSHIFT(suffix, leftp)                                           \
2498
    void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
2499
    {                                                                   \
2500
        int shift = b->u8[LO_IDX*0x15] & 0x7;                           \
2501
        int doit = 1;                                                   \
2502
        int i;                                                          \
2503
        for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
2504
            doit = doit && ((b->u8[i] & 0x7) == shift);                 \
2505
        }                                                               \
2506
        if (doit) {                                                     \
2507
            if (shift == 0) {                                           \
2508
                *r = *a;                                                \
2509
            } else if (leftp) {                                         \
2510
                uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
2511
                r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
2512
                r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
2513
            } else {                                                    \
2514
                uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
2515
                r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
2516
                r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
2517
            }                                                           \
2518
        }                                                               \
2519
    }
2520
VSHIFT(l, LEFT)
2521
VSHIFT(r, RIGHT)
2522
#undef VSHIFT
2523
#undef LEFT
2524
#undef RIGHT
2525

    
2526
#define VSL(suffix, element)                                            \
2527
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2528
    {                                                                   \
2529
        int i;                                                          \
2530
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2531
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2532
            unsigned int shift = b->element[i] & mask;                  \
2533
            r->element[i] = a->element[i] << shift;                     \
2534
        }                                                               \
2535
    }
2536
VSL(b, u8)
2537
VSL(h, u16)
2538
VSL(w, u32)
2539
#undef VSL
2540

    
2541
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2542
{
2543
    int sh = shift & 0xf;
2544
    int i;
2545
    ppc_avr_t result;
2546

    
2547
#if defined(WORDS_BIGENDIAN)
2548
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2549
        int index = sh + i;
2550
        if (index > 0xf) {
2551
            result.u8[i] = b->u8[index-0x10];
2552
        } else {
2553
            result.u8[i] = a->u8[index];
2554
        }
2555
    }
2556
#else
2557
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2558
        int index = (16 - sh) + i;
2559
        if (index > 0xf) {
2560
            result.u8[i] = a->u8[index-0x10];
2561
        } else {
2562
            result.u8[i] = b->u8[index];
2563
        }
2564
    }
2565
#endif
2566
    *r = result;
2567
}
2568

    
2569
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2570
{
2571
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2572

    
2573
#if defined (WORDS_BIGENDIAN)
2574
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2575
  memset (&r->u8[16-sh], 0, sh);
2576
#else
2577
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2578
  memset (&r->u8[0], 0, sh);
2579
#endif
2580
}
2581

    
2582
/* Experimental testing shows that hardware masks the immediate.  */
2583
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2584
#if defined(WORDS_BIGENDIAN)
2585
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2586
#else
2587
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2588
#endif
2589
#define VSPLT(suffix, element)                                          \
2590
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2591
    {                                                                   \
2592
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2593
        int i;                                                          \
2594
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2595
            r->element[i] = s;                                          \
2596
        }                                                               \
2597
    }
2598
VSPLT(b, u8)
2599
VSPLT(h, u16)
2600
VSPLT(w, u32)
2601
#undef VSPLT
2602
#undef SPLAT_ELEMENT
2603
#undef _SPLAT_MASKED
2604

    
2605
#define VSPLTI(suffix, element, splat_type)                     \
2606
    void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat)  \
2607
    {                                                           \
2608
        splat_type x = (int8_t)(splat << 3) >> 3;               \
2609
        int i;                                                  \
2610
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
2611
            r->element[i] = x;                                  \
2612
        }                                                       \
2613
    }
2614
VSPLTI(b, s8, int8_t)
2615
VSPLTI(h, s16, int16_t)
2616
VSPLTI(w, s32, int32_t)
2617
#undef VSPLTI
2618

    
2619
#define VSR(suffix, element)                                            \
2620
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2621
    {                                                                   \
2622
        int i;                                                          \
2623
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2624
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2625
            unsigned int shift = b->element[i] & mask;                  \
2626
            r->element[i] = a->element[i] >> shift;                     \
2627
        }                                                               \
2628
    }
2629
VSR(ab, s8)
2630
VSR(ah, s16)
2631
VSR(aw, s32)
2632
VSR(b, u8)
2633
VSR(h, u16)
2634
VSR(w, u32)
2635
#undef VSR
2636

    
2637
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2638
{
2639
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2640

    
2641
#if defined (WORDS_BIGENDIAN)
2642
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2643
  memset (&r->u8[0], 0, sh);
2644
#else
2645
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2646
  memset (&r->u8[16-sh], 0, sh);
2647
#endif
2648
}
2649

    
2650
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2651
{
2652
    int i;
2653
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2654
        r->u32[i] = a->u32[i] >= b->u32[i];
2655
    }
2656
}
2657

    
2658
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2659
{
2660
    int64_t t;
2661
    int i, upper;
2662
    ppc_avr_t result;
2663
    int sat = 0;
2664

    
2665
#if defined(WORDS_BIGENDIAN)
2666
    upper = ARRAY_SIZE(r->s32)-1;
2667
#else
2668
    upper = 0;
2669
#endif
2670
    t = (int64_t)b->s32[upper];
2671
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2672
        t += a->s32[i];
2673
        result.s32[i] = 0;
2674
    }
2675
    result.s32[upper] = cvtsdsw(t, &sat);
2676
    *r = result;
2677

    
2678
    if (sat) {
2679
        env->vscr |= (1 << VSCR_SAT);
2680
    }
2681
}
2682

    
2683
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2684
{
2685
    int i, j, upper;
2686
    ppc_avr_t result;
2687
    int sat = 0;
2688

    
2689
#if defined(WORDS_BIGENDIAN)
2690
    upper = 1;
2691
#else
2692
    upper = 0;
2693
#endif
2694
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2695
        int64_t t = (int64_t)b->s32[upper+i*2];
2696
        result.u64[i] = 0;
2697
        for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2698
            t += a->s32[2*i+j];
2699
        }
2700
        result.s32[upper+i*2] = cvtsdsw(t, &sat);
2701
    }
2702

    
2703
    *r = result;
2704
    if (sat) {
2705
        env->vscr |= (1 << VSCR_SAT);
2706
    }
2707
}
2708

    
2709
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2710
{
2711
    int i, j;
2712
    int sat = 0;
2713

    
2714
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2715
        int64_t t = (int64_t)b->s32[i];
2716
        for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2717
            t += a->s8[4*i+j];
2718
        }
2719
        r->s32[i] = cvtsdsw(t, &sat);
2720
    }
2721

    
2722
    if (sat) {
2723
        env->vscr |= (1 << VSCR_SAT);
2724
    }
2725
}
2726

    
2727
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2728
{
2729
    int sat = 0;
2730
    int i;
2731

    
2732
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2733
        int64_t t = (int64_t)b->s32[i];
2734
        t += a->s16[2*i] + a->s16[2*i+1];
2735
        r->s32[i] = cvtsdsw(t, &sat);
2736
    }
2737

    
2738
    if (sat) {
2739
        env->vscr |= (1 << VSCR_SAT);
2740
    }
2741
}
2742

    
2743
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2744
{
2745
    int i, j;
2746
    int sat = 0;
2747

    
2748
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2749
        uint64_t t = (uint64_t)b->u32[i];
2750
        for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2751
            t += a->u8[4*i+j];
2752
        }
2753
        r->u32[i] = cvtuduw(t, &sat);
2754
    }
2755

    
2756
    if (sat) {
2757
        env->vscr |= (1 << VSCR_SAT);
2758
    }
2759
}
2760

    
2761
#if defined(WORDS_BIGENDIAN)
2762
#define UPKHI 1
2763
#define UPKLO 0
2764
#else
2765
#define UPKHI 0
2766
#define UPKLO 1
2767
#endif
2768
#define VUPKPX(suffix, hi)                                      \
2769
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
2770
    {                                                           \
2771
        int i;                                                  \
2772
        ppc_avr_t result;                                       \
2773
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
2774
            uint16_t e = b->u16[hi ? i : i+4];                  \
2775
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
2776
            uint8_t r = (e >> 10) & 0x1f;                       \
2777
            uint8_t g = (e >> 5) & 0x1f;                        \
2778
            uint8_t b = e & 0x1f;                               \
2779
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
2780
        }                                                               \
2781
        *r = result;                                                    \
2782
    }
2783
VUPKPX(lpx, UPKLO)
2784
VUPKPX(hpx, UPKHI)
2785
#undef VUPKPX
2786

    
2787
#define VUPK(suffix, unpacked, packee, hi)                              \
2788
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2789
    {                                                                   \
2790
        int i;                                                          \
2791
        ppc_avr_t result;                                               \
2792
        if (hi) {                                                       \
2793
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
2794
                result.unpacked[i] = b->packee[i];                      \
2795
            }                                                           \
2796
        } else {                                                        \
2797
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
2798
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
2799
            }                                                           \
2800
        }                                                               \
2801
        *r = result;                                                    \
2802
    }
2803
VUPK(hsb, s16, s8, UPKHI)
2804
VUPK(hsh, s32, s16, UPKHI)
2805
VUPK(lsb, s16, s8, UPKLO)
2806
VUPK(lsh, s32, s16, UPKLO)
2807
#undef VUPK
2808
#undef UPKHI
2809
#undef UPKLO
2810

    
2811
#undef VECTOR_FOR_INORDER_I
2812
#undef HI_IDX
2813
#undef LO_IDX
2814

    
2815
/*****************************************************************************/
2816
/* SPE extension helpers */
2817
/* Use a table to make this quicker */
2818
static uint8_t hbrev[16] = {
2819
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2820
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
2821
};
2822

    
2823
static always_inline uint8_t byte_reverse (uint8_t val)
2824
{
2825
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
2826
}
2827

    
2828
static always_inline uint32_t word_reverse (uint32_t val)
2829
{
2830
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2831
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2832
}
2833

    
2834
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2835
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2836
{
2837
    uint32_t a, b, d, mask;
2838

    
2839
    mask = UINT32_MAX >> (32 - MASKBITS);
2840
    a = arg1 & mask;
2841
    b = arg2 & mask;
2842
    d = word_reverse(1 + word_reverse(a | ~b));
2843
    return (arg1 & ~mask) | (d & b);
2844
}
2845

    
2846
uint32_t helper_cntlsw32 (uint32_t val)
2847
{
2848
    if (val & 0x80000000)
2849
        return clz32(~val);
2850
    else
2851
        return clz32(val);
2852
}
2853

    
2854
uint32_t helper_cntlzw32 (uint32_t val)
2855
{
2856
    return clz32(val);
2857
}
2858

    
2859
/* Single-precision floating-point conversions */
2860
static always_inline uint32_t efscfsi (uint32_t val)
2861
{
2862
    CPU_FloatU u;
2863

    
2864
    u.f = int32_to_float32(val, &env->vec_status);
2865

    
2866
    return u.l;
2867
}
2868

    
2869
static always_inline uint32_t efscfui (uint32_t val)
2870
{
2871
    CPU_FloatU u;
2872

    
2873
    u.f = uint32_to_float32(val, &env->vec_status);
2874

    
2875
    return u.l;
2876
}
2877

    
2878
static always_inline int32_t efsctsi (uint32_t val)
2879
{
2880
    CPU_FloatU u;
2881

    
2882
    u.l = val;
2883
    /* NaN are not treated the same way IEEE 754 does */
2884
    if (unlikely(float32_is_nan(u.f)))
2885
        return 0;
2886

    
2887
    return float32_to_int32(u.f, &env->vec_status);
2888
}
2889

    
2890
static always_inline uint32_t efsctui (uint32_t val)
2891
{
2892
    CPU_FloatU u;
2893

    
2894
    u.l = val;
2895
    /* NaN are not treated the same way IEEE 754 does */
2896
    if (unlikely(float32_is_nan(u.f)))
2897
        return 0;
2898

    
2899
    return float32_to_uint32(u.f, &env->vec_status);
2900
}
2901

    
2902
static always_inline uint32_t efsctsiz (uint32_t val)
2903
{
2904
    CPU_FloatU u;
2905

    
2906
    u.l = val;
2907
    /* NaN are not treated the same way IEEE 754 does */
2908
    if (unlikely(float32_is_nan(u.f)))
2909
        return 0;
2910

    
2911
    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
2912
}
2913

    
2914
static always_inline uint32_t efsctuiz (uint32_t val)
2915
{
2916
    CPU_FloatU u;
2917

    
2918
    u.l = val;
2919
    /* NaN are not treated the same way IEEE 754 does */
2920
    if (unlikely(float32_is_nan(u.f)))
2921
        return 0;
2922

    
2923
    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
2924
}
2925

    
2926
static always_inline uint32_t efscfsf (uint32_t val)
2927
{
2928
    CPU_FloatU u;
2929
    float32 tmp;
2930

    
2931
    u.f = int32_to_float32(val, &env->vec_status);
2932
    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
2933
    u.f = float32_div(u.f, tmp, &env->vec_status);
2934

    
2935
    return u.l;
2936
}
2937

    
2938
static always_inline uint32_t efscfuf (uint32_t val)
2939
{
2940
    CPU_FloatU u;
2941
    float32 tmp;
2942

    
2943
    u.f = uint32_to_float32(val, &env->vec_status);
2944
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
2945
    u.f = float32_div(u.f, tmp, &env->vec_status);
2946

    
2947
    return u.l;
2948
}
2949

    
2950
static always_inline uint32_t efsctsf (uint32_t val)
2951
{
2952
    CPU_FloatU u;
2953
    float32 tmp;
2954

    
2955
    u.l = val;
2956
    /* NaN are not treated the same way IEEE 754 does */
2957
    if (unlikely(float32_is_nan(u.f)))
2958
        return 0;
2959
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
2960
    u.f = float32_mul(u.f, tmp, &env->vec_status);
2961

    
2962
    return float32_to_int32(u.f, &env->vec_status);
2963
}
2964

    
2965
static always_inline uint32_t efsctuf (uint32_t val)
2966
{
2967
    CPU_FloatU u;
2968
    float32 tmp;
2969

    
2970
    u.l = val;
2971
    /* NaN are not treated the same way IEEE 754 does */
2972
    if (unlikely(float32_is_nan(u.f)))
2973
        return 0;
2974
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
2975
    u.f = float32_mul(u.f, tmp, &env->vec_status);
2976

    
2977
    return float32_to_uint32(u.f, &env->vec_status);
2978
}
2979

    
2980
#define HELPER_SPE_SINGLE_CONV(name)                                          \
2981
uint32_t helper_e##name (uint32_t val)                                        \
2982
{                                                                             \
2983
    return e##name(val);                                                      \
2984
}
2985
/* efscfsi */
2986
HELPER_SPE_SINGLE_CONV(fscfsi);
2987
/* efscfui */
2988
HELPER_SPE_SINGLE_CONV(fscfui);
2989
/* efscfuf */
2990
HELPER_SPE_SINGLE_CONV(fscfuf);
2991
/* efscfsf */
2992
HELPER_SPE_SINGLE_CONV(fscfsf);
2993
/* efsctsi */
2994
HELPER_SPE_SINGLE_CONV(fsctsi);
2995
/* efsctui */
2996
HELPER_SPE_SINGLE_CONV(fsctui);
2997
/* efsctsiz */
2998
HELPER_SPE_SINGLE_CONV(fsctsiz);
2999
/* efsctuiz */
3000
HELPER_SPE_SINGLE_CONV(fsctuiz);
3001
/* efsctsf */
3002
HELPER_SPE_SINGLE_CONV(fsctsf);
3003
/* efsctuf */
3004
HELPER_SPE_SINGLE_CONV(fsctuf);
3005

    
3006
#define HELPER_SPE_VECTOR_CONV(name)                                          \
3007
uint64_t helper_ev##name (uint64_t val)                                       \
3008
{                                                                             \
3009
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
3010
            (uint64_t)e##name(val);                                           \
3011
}
3012
/* evfscfsi */
3013
HELPER_SPE_VECTOR_CONV(fscfsi);
3014
/* evfscfui */
3015
HELPER_SPE_VECTOR_CONV(fscfui);
3016
/* evfscfuf */
3017
HELPER_SPE_VECTOR_CONV(fscfuf);
3018
/* evfscfsf */
3019
HELPER_SPE_VECTOR_CONV(fscfsf);
3020
/* evfsctsi */
3021
HELPER_SPE_VECTOR_CONV(fsctsi);
3022
/* evfsctui */
3023
HELPER_SPE_VECTOR_CONV(fsctui);
3024
/* evfsctsiz */
3025
HELPER_SPE_VECTOR_CONV(fsctsiz);
3026
/* evfsctuiz */
3027
HELPER_SPE_VECTOR_CONV(fsctuiz);
3028
/* evfsctsf */
3029
HELPER_SPE_VECTOR_CONV(fsctsf);
3030
/* evfsctuf */
3031
HELPER_SPE_VECTOR_CONV(fsctuf);
3032

    
3033
/* Single-precision floating-point arithmetic */
3034
static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
3035
{
3036
    CPU_FloatU u1, u2;
3037
    u1.l = op1;
3038
    u2.l = op2;
3039
    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3040
    return u1.l;
3041
}
3042

    
3043
static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
3044
{
3045
    CPU_FloatU u1, u2;
3046
    u1.l = op1;
3047
    u2.l = op2;
3048
    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3049
    return u1.l;
3050
}
3051

    
3052
static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
3053
{
3054
    CPU_FloatU u1, u2;
3055
    u1.l = op1;
3056
    u2.l = op2;
3057
    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3058
    return u1.l;
3059
}
3060

    
3061
static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
3062
{
3063
    CPU_FloatU u1, u2;
3064
    u1.l = op1;
3065
    u2.l = op2;
3066
    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3067
    return u1.l;
3068
}
3069

    
3070
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
3071
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3072
{                                                                             \
3073
    return e##name(op1, op2);                                                 \
3074
}
3075
/* efsadd */
3076
HELPER_SPE_SINGLE_ARITH(fsadd);
3077
/* efssub */
3078
HELPER_SPE_SINGLE_ARITH(fssub);
3079
/* efsmul */
3080
HELPER_SPE_SINGLE_ARITH(fsmul);
3081
/* efsdiv */
3082
HELPER_SPE_SINGLE_ARITH(fsdiv);
3083

    
3084
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
3085
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3086
{                                                                             \
3087
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
3088
            (uint64_t)e##name(op1, op2);                                      \
3089
}
3090
/* evfsadd */
3091
HELPER_SPE_VECTOR_ARITH(fsadd);
3092
/* evfssub */
3093
HELPER_SPE_VECTOR_ARITH(fssub);
3094
/* evfsmul */
3095
HELPER_SPE_VECTOR_ARITH(fsmul);
3096
/* evfsdiv */
3097
HELPER_SPE_VECTOR_ARITH(fsdiv);
3098

    
3099
/* Single-precision floating-point comparisons */
3100
static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
3101
{
3102
    CPU_FloatU u1, u2;
3103
    u1.l = op1;
3104
    u2.l = op2;
3105
    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3106
}
3107

    
3108
static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
3109
{
3110
    CPU_FloatU u1, u2;
3111
    u1.l = op1;
3112
    u2.l = op2;
3113
    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3114
}
3115

    
3116
static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
3117
{
3118
    CPU_FloatU u1, u2;
3119
    u1.l = op1;
3120
    u2.l = op2;
3121
    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3122
}
3123

    
3124
static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
3125
{
3126
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3127
    return efststlt(op1, op2);
3128
}
3129

    
3130
static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
3131
{
3132
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3133
    return efststgt(op1, op2);
3134
}
3135

    
3136
static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
3137
{
3138
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3139
    return efststeq(op1, op2);
3140
}
3141

    
3142
#define HELPER_SINGLE_SPE_CMP(name)                                           \
3143
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3144
{                                                                             \
3145
    return e##name(op1, op2) << 2;                                            \
3146
}
3147
/* efststlt */
3148
HELPER_SINGLE_SPE_CMP(fststlt);
3149
/* efststgt */
3150
HELPER_SINGLE_SPE_CMP(fststgt);
3151
/* efststeq */
3152
HELPER_SINGLE_SPE_CMP(fststeq);
3153
/* efscmplt */
3154
HELPER_SINGLE_SPE_CMP(fscmplt);
3155
/* efscmpgt */
3156
HELPER_SINGLE_SPE_CMP(fscmpgt);
3157
/* efscmpeq */
3158
HELPER_SINGLE_SPE_CMP(fscmpeq);
3159

    
3160
static always_inline uint32_t evcmp_merge (int t0, int t1)
3161
{
3162
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3163
}
3164

    
3165
#define HELPER_VECTOR_SPE_CMP(name)                                           \
3166
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3167
{                                                                             \
3168
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
3169
}
3170
/* evfststlt */
3171
HELPER_VECTOR_SPE_CMP(fststlt);
3172
/* evfststgt */
3173
HELPER_VECTOR_SPE_CMP(fststgt);
3174
/* evfststeq */
3175
HELPER_VECTOR_SPE_CMP(fststeq);
3176
/* evfscmplt */
3177
HELPER_VECTOR_SPE_CMP(fscmplt);
3178
/* evfscmpgt */
3179
HELPER_VECTOR_SPE_CMP(fscmpgt);
3180
/* evfscmpeq */
3181
HELPER_VECTOR_SPE_CMP(fscmpeq);
3182

    
3183
/* Double-precision floating-point conversion */
3184
uint64_t helper_efdcfsi (uint32_t val)
3185
{
3186
    CPU_DoubleU u;
3187

    
3188
    u.d = int32_to_float64(val, &env->vec_status);
3189

    
3190
    return u.ll;
3191
}
3192

    
3193
uint64_t helper_efdcfsid (uint64_t val)
3194
{
3195
    CPU_DoubleU u;
3196

    
3197
    u.d = int64_to_float64(val, &env->vec_status);
3198

    
3199
    return u.ll;
3200
}
3201

    
3202
uint64_t helper_efdcfui (uint32_t val)
3203
{
3204
    CPU_DoubleU u;
3205

    
3206
    u.d = uint32_to_float64(val, &env->vec_status);
3207

    
3208
    return u.ll;
3209
}
3210

    
3211
uint64_t helper_efdcfuid (uint64_t val)
3212
{
3213
    CPU_DoubleU u;
3214

    
3215
    u.d = uint64_to_float64(val, &env->vec_status);
3216

    
3217
    return u.ll;
3218
}
3219

    
3220
uint32_t helper_efdctsi (uint64_t val)
3221
{
3222
    CPU_DoubleU u;
3223

    
3224
    u.ll = val;
3225
    /* NaN are not treated the same way IEEE 754 does */
3226
    if (unlikely(float64_is_nan(u.d)))
3227
        return 0;
3228

    
3229
    return float64_to_int32(u.d, &env->vec_status);
3230
}
3231

    
3232
uint32_t helper_efdctui (uint64_t val)
3233
{
3234
    CPU_DoubleU u;
3235

    
3236
    u.ll = val;
3237
    /* NaN are not treated the same way IEEE 754 does */
3238
    if (unlikely(float64_is_nan(u.d)))
3239
        return 0;
3240

    
3241
    return float64_to_uint32(u.d, &env->vec_status);
3242
}
3243

    
3244
uint32_t helper_efdctsiz (uint64_t val)
3245
{
3246
    CPU_DoubleU u;
3247

    
3248
    u.ll = val;
3249
    /* NaN are not treated the same way IEEE 754 does */
3250
    if (unlikely(float64_is_nan(u.d)))
3251
        return 0;
3252

    
3253
    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3254
}
3255

    
3256
uint64_t helper_efdctsidz (uint64_t val)
3257
{
3258
    CPU_DoubleU u;
3259

    
3260
    u.ll = val;
3261
    /* NaN are not treated the same way IEEE 754 does */
3262
    if (unlikely(float64_is_nan(u.d)))
3263
        return 0;
3264

    
3265
    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3266
}
3267

    
3268
uint32_t helper_efdctuiz (uint64_t val)
3269
{
3270
    CPU_DoubleU u;
3271

    
3272
    u.ll = val;
3273
    /* NaN are not treated the same way IEEE 754 does */
3274
    if (unlikely(float64_is_nan(u.d)))
3275
        return 0;
3276

    
3277
    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3278
}
3279

    
3280
uint64_t helper_efdctuidz (uint64_t val)
3281
{
3282
    CPU_DoubleU u;
3283

    
3284
    u.ll = val;
3285
    /* NaN are not treated the same way IEEE 754 does */
3286
    if (unlikely(float64_is_nan(u.d)))
3287
        return 0;
3288

    
3289
    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3290
}
3291

    
3292
uint64_t helper_efdcfsf (uint32_t val)
3293
{
3294
    CPU_DoubleU u;
3295
    float64 tmp;
3296

    
3297
    u.d = int32_to_float64(val, &env->vec_status);
3298
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3299
    u.d = float64_div(u.d, tmp, &env->vec_status);
3300

    
3301
    return u.ll;
3302
}
3303

    
3304
uint64_t helper_efdcfuf (uint32_t val)
3305
{
3306
    CPU_DoubleU u;
3307
    float64 tmp;
3308

    
3309
    u.d = uint32_to_float64(val, &env->vec_status);
3310
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3311
    u.d = float64_div(u.d, tmp, &env->vec_status);
3312

    
3313
    return u.ll;
3314
}
3315

    
3316
uint32_t helper_efdctsf (uint64_t val)
3317
{
3318
    CPU_DoubleU u;
3319
    float64 tmp;
3320

    
3321
    u.ll = val;
3322
    /* NaN are not treated the same way IEEE 754 does */
3323
    if (unlikely(float64_is_nan(u.d)))
3324
        return 0;
3325
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3326
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3327

    
3328
    return float64_to_int32(u.d, &env->vec_status);
3329
}
3330

    
3331
uint32_t helper_efdctuf (uint64_t val)
3332
{
3333
    CPU_DoubleU u;
3334
    float64 tmp;
3335

    
3336
    u.ll = val;
3337
    /* NaN are not treated the same way IEEE 754 does */
3338
    if (unlikely(float64_is_nan(u.d)))
3339
        return 0;
3340
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3341
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3342

    
3343
    return float64_to_uint32(u.d, &env->vec_status);
3344
}
3345

    
3346
uint32_t helper_efscfd (uint64_t val)
3347
{
3348
    CPU_DoubleU u1;
3349
    CPU_FloatU u2;
3350

    
3351
    u1.ll = val;
3352
    u2.f = float64_to_float32(u1.d, &env->vec_status);
3353

    
3354
    return u2.l;
3355
}
3356

    
3357
uint64_t helper_efdcfs (uint32_t val)
3358
{
3359
    CPU_DoubleU u2;
3360
    CPU_FloatU u1;
3361

    
3362
    u1.l = val;
3363
    u2.d = float32_to_float64(u1.f, &env->vec_status);
3364

    
3365
    return u2.ll;
3366
}
3367

    
3368
/* Double precision fixed-point arithmetic */
3369
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3370
{
3371
    CPU_DoubleU u1, u2;
3372
    u1.ll = op1;
3373
    u2.ll = op2;
3374
    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3375
    return u1.ll;
3376
}
3377

    
3378
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3379
{
3380
    CPU_DoubleU u1, u2;
3381
    u1.ll = op1;
3382
    u2.ll = op2;
3383
    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3384
    return u1.ll;
3385
}
3386

    
3387
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3388
{
3389
    CPU_DoubleU u1, u2;
3390
    u1.ll = op1;
3391
    u2.ll = op2;
3392
    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3393
    return u1.ll;
3394
}
3395

    
3396
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3397
{
3398
    CPU_DoubleU u1, u2;
3399
    u1.ll = op1;
3400
    u2.ll = op2;
3401
    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3402
    return u1.ll;
3403
}
3404

    
3405
/* Double precision floating point helpers */
3406
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3407
{
3408
    CPU_DoubleU u1, u2;
3409
    u1.ll = op1;
3410
    u2.ll = op2;
3411
    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3412
}
3413

    
3414
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3415
{
3416
    CPU_DoubleU u1, u2;
3417
    u1.ll = op1;
3418
    u2.ll = op2;
3419
    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3420
}
3421

    
3422
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3423
{
3424
    CPU_DoubleU u1, u2;
3425
    u1.ll = op1;
3426
    u2.ll = op2;
3427
    return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3428
}
3429

    
3430
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3431
{
3432
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3433
    return helper_efdtstlt(op1, op2);
3434
}
3435

    
3436
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3437
{
3438
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3439
    return helper_efdtstgt(op1, op2);
3440
}
3441

    
3442
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3443
{
3444
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3445
    return helper_efdtsteq(op1, op2);
3446
}
3447

    
3448
/*****************************************************************************/
3449
/* Softmmu support */
3450
#if !defined (CONFIG_USER_ONLY)
3451

    
3452
#define MMUSUFFIX _mmu
3453

    
3454
#define SHIFT 0
3455
#include "softmmu_template.h"
3456

    
3457
#define SHIFT 1
3458
#include "softmmu_template.h"
3459

    
3460
#define SHIFT 2
3461
#include "softmmu_template.h"
3462

    
3463
#define SHIFT 3
3464
#include "softmmu_template.h"
3465

    
3466
/* try to fill the TLB and return an exception if error. If retaddr is
3467
   NULL, it means that the function was called in C code (i.e. not
3468
   from generated code or from helper.c) */
3469
/* XXX: fix it to restore all registers */
3470
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3471
{
3472
    TranslationBlock *tb;
3473
    CPUState *saved_env;
3474
    unsigned long pc;
3475
    int ret;
3476

    
3477
    /* XXX: hack to restore env in all cases, even if not called from
3478
       generated code */
3479
    saved_env = env;
3480
    env = cpu_single_env;
3481
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3482
    if (unlikely(ret != 0)) {
3483
        if (likely(retaddr)) {
3484
            /* now we have a real cpu fault */
3485
            pc = (unsigned long)retaddr;
3486
            tb = tb_find_pc(pc);
3487
            if (likely(tb)) {
3488
                /* the PC is inside the translated code. It means that we have
3489
                   a virtual CPU fault */
3490
                cpu_restore_state(tb, env, pc, NULL);
3491
            }
3492
        }
3493
        helper_raise_exception_err(env->exception_index, env->error_code);
3494
    }
3495
    env = saved_env;
3496
}
3497

    
3498
/* Segment registers load and store */
3499
target_ulong helper_load_sr (target_ulong sr_num)
3500
{
3501
    return env->sr[sr_num];
3502
}
3503

    
3504
void helper_store_sr (target_ulong sr_num, target_ulong val)
3505
{
3506
    ppc_store_sr(env, sr_num, val);
3507
}
3508

    
3509
/* SLB management */
3510
#if defined(TARGET_PPC64)
3511
target_ulong helper_load_slb (target_ulong slb_nr)
3512
{
3513
    return ppc_load_slb(env, slb_nr);
3514
}
3515

    
3516
void helper_store_slb (target_ulong slb_nr, target_ulong rs)
3517
{
3518
    ppc_store_slb(env, slb_nr, rs);
3519
}
3520

    
3521
void helper_slbia (void)
3522
{
3523
    ppc_slb_invalidate_all(env);
3524
}
3525

    
3526
void helper_slbie (target_ulong addr)
3527
{
3528
    ppc_slb_invalidate_one(env, addr);
3529
}
3530

    
3531
#endif /* defined(TARGET_PPC64) */
3532

    
3533
/* TLB management */
3534
void helper_tlbia (void)
3535
{
3536
    ppc_tlb_invalidate_all(env);
3537
}
3538

    
3539
void helper_tlbie (target_ulong addr)
3540
{
3541
    ppc_tlb_invalidate_one(env, addr);
3542
}
3543

    
3544
/* Software driven TLBs management */
3545
/* PowerPC 602/603 software TLB load instructions helpers */
3546
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3547
{
3548
    target_ulong RPN, CMP, EPN;
3549
    int way;
3550

    
3551
    RPN = env->spr[SPR_RPA];
3552
    if (is_code) {
3553
        CMP = env->spr[SPR_ICMP];
3554
        EPN = env->spr[SPR_IMISS];
3555
    } else {
3556
        CMP = env->spr[SPR_DCMP];
3557
        EPN = env->spr[SPR_DMISS];
3558
    }
3559
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3560
    LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3561
                " PTE1 " ADDRX " way %d\n",
3562
                __func__, new_EPN, EPN, CMP, RPN, way);
3563
    /* Store this TLB */
3564
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3565
                     way, is_code, CMP, RPN);
3566
}
3567

    
3568
void helper_6xx_tlbd (target_ulong EPN)
3569
{
3570
    do_6xx_tlb(EPN, 0);
3571
}
3572

    
3573
void helper_6xx_tlbi (target_ulong EPN)
3574
{
3575
    do_6xx_tlb(EPN, 1);
3576
}
3577

    
3578
/* PowerPC 74xx software TLB load instructions helpers */
3579
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3580
{
3581
    target_ulong RPN, CMP, EPN;
3582
    int way;
3583

    
3584
    RPN = env->spr[SPR_PTELO];
3585
    CMP = env->spr[SPR_PTEHI];
3586
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3587
    way = env->spr[SPR_TLBMISS] & 0x3;
3588
    LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3589
                " PTE1 " ADDRX " way %d\n",
3590
                __func__, new_EPN, EPN, CMP, RPN, way);
3591
    /* Store this TLB */
3592
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3593
                     way, is_code, CMP, RPN);
3594
}
3595

    
3596
void helper_74xx_tlbd (target_ulong EPN)
3597
{
3598
    do_74xx_tlb(EPN, 0);
3599
}
3600

    
3601
void helper_74xx_tlbi (target_ulong EPN)
3602
{
3603
    do_74xx_tlb(EPN, 1);
3604
}
3605

    
3606
static always_inline target_ulong booke_tlb_to_page_size (int size)
3607
{
3608
    return 1024 << (2 * size);
3609
}
3610

    
3611
static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3612
{
3613
    int size;
3614

    
3615
    switch (page_size) {
3616
    case 0x00000400UL:
3617
        size = 0x0;
3618
        break;
3619
    case 0x00001000UL:
3620
        size = 0x1;
3621
        break;
3622
    case 0x00004000UL:
3623
        size = 0x2;
3624
        break;
3625
    case 0x00010000UL:
3626
        size = 0x3;
3627
        break;
3628
    case 0x00040000UL:
3629
        size = 0x4;
3630
        break;
3631
    case 0x00100000UL:
3632
        size = 0x5;
3633
        break;
3634
    case 0x00400000UL:
3635
        size = 0x6;
3636
        break;
3637
    case 0x01000000UL:
3638
        size = 0x7;
3639
        break;
3640
    case 0x04000000UL:
3641
        size = 0x8;
3642
        break;
3643
    case 0x10000000UL:
3644
        size = 0x9;
3645
        break;
3646
    case 0x40000000UL:
3647
        size = 0xA;
3648
        break;
3649
#if defined (TARGET_PPC64)
3650
    case 0x000100000000ULL:
3651
        size = 0xB;
3652
        break;
3653
    case 0x000400000000ULL:
3654
        size = 0xC;
3655
        break;
3656
    case 0x001000000000ULL:
3657
        size = 0xD;
3658
        break;
3659
    case 0x004000000000ULL:
3660
        size = 0xE;
3661
        break;
3662
    case 0x010000000000ULL:
3663
        size = 0xF;
3664
        break;
3665
#endif
3666
    default:
3667
        size = -1;
3668
        break;
3669
    }
3670

    
3671
    return size;
3672
}
3673

    
3674
/* Helpers for 4xx TLB management */
3675
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3676
{
3677
    ppcemb_tlb_t *tlb;
3678
    target_ulong ret;
3679
    int size;
3680

    
3681
    entry &= 0x3F;
3682
    tlb = &env->tlb[entry].tlbe;
3683
    ret = tlb->EPN;
3684
    if (tlb->prot & PAGE_VALID)
3685
        ret |= 0x400;
3686
    size = booke_page_size_to_tlb(tlb->size);
3687
    if (size < 0 || size > 0x7)
3688
        size = 1;
3689
    ret |= size << 7;
3690
    env->spr[SPR_40x_PID] = tlb->PID;
3691
    return ret;
3692
}
3693

    
3694
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3695
{
3696
    ppcemb_tlb_t *tlb;
3697
    target_ulong ret;
3698

    
3699
    entry &= 0x3F;
3700
    tlb = &env->tlb[entry].tlbe;
3701
    ret = tlb->RPN;
3702
    if (tlb->prot & PAGE_EXEC)
3703
        ret |= 0x200;
3704
    if (tlb->prot & PAGE_WRITE)
3705
        ret |= 0x100;
3706
    return ret;
3707
}
3708

    
3709
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3710
{
3711
    ppcemb_tlb_t *tlb;
3712
    target_ulong page, end;
3713

    
3714
    LOG_SWTLB("%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3715
    entry &= 0x3F;
3716
    tlb = &env->tlb[entry].tlbe;
3717
    /* Invalidate previous TLB (if it's valid) */
3718
    if (tlb->prot & PAGE_VALID) {
3719
        end = tlb->EPN + tlb->size;
3720
        LOG_SWTLB("%s: invalidate old TLB %d start " ADDRX
3721
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3722
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3723
            tlb_flush_page(env, page);
3724
    }
3725
    tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3726
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3727
     * If this ever occurs, one should use the ppcemb target instead
3728
     * of the ppc or ppc64 one
3729
     */
3730
    if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3731
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3732
                  "are not supported (%d)\n",
3733
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3734
    }
3735
    tlb->EPN = val & ~(tlb->size - 1);
3736
    if (val & 0x40)
3737
        tlb->prot |= PAGE_VALID;
3738
    else
3739
        tlb->prot &= ~PAGE_VALID;
3740
    if (val & 0x20) {
3741
        /* XXX: TO BE FIXED */
3742
        cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3743
    }
3744
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3745
    tlb->attr = val & 0xFF;
3746
    LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3747
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3748
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3749
                tlb->prot & PAGE_READ ? 'r' : '-',
3750
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3751
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3752
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3753
    /* Invalidate new TLB (if valid) */
3754
    if (tlb->prot & PAGE_VALID) {
3755
        end = tlb->EPN + tlb->size;
3756
        LOG_SWTLB("%s: invalidate TLB %d start " ADDRX
3757
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3758
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3759
            tlb_flush_page(env, page);
3760
    }
3761
}
3762

    
3763
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
3764
{
3765
    ppcemb_tlb_t *tlb;
3766

    
3767
    LOG_SWTLB("%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
3768
    entry &= 0x3F;
3769
    tlb = &env->tlb[entry].tlbe;
3770
    tlb->RPN = val & 0xFFFFFC00;
3771
    tlb->prot = PAGE_READ;
3772
    if (val & 0x200)
3773
        tlb->prot |= PAGE_EXEC;
3774
    if (val & 0x100)
3775
        tlb->prot |= PAGE_WRITE;
3776
    LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3777
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3778
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3779
                tlb->prot & PAGE_READ ? 'r' : '-',
3780
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3781
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3782
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3783
}
3784

    
3785
target_ulong helper_4xx_tlbsx (target_ulong address)
3786
{
3787
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
3788
}
3789

    
3790
/* PowerPC 440 TLB management */
3791
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
3792
{
3793
    ppcemb_tlb_t *tlb;
3794
    target_ulong EPN, RPN, size;
3795
    int do_flush_tlbs;
3796

    
3797
    LOG_SWTLB("%s word %d entry %d value " ADDRX "\n",
3798
                __func__, word, (int)entry, value);
3799
    do_flush_tlbs = 0;
3800
    entry &= 0x3F;
3801
    tlb = &env->tlb[entry].tlbe;
3802
    switch (word) {
3803
    default:
3804
        /* Just here to please gcc */
3805
    case 0:
3806
        EPN = value & 0xFFFFFC00;
3807
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
3808
            do_flush_tlbs = 1;
3809
        tlb->EPN = EPN;
3810
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
3811
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3812
            do_flush_tlbs = 1;
3813
        tlb->size = size;
3814
        tlb->attr &= ~0x1;
3815
        tlb->attr |= (value >> 8) & 1;
3816
        if (value & 0x200) {
3817
            tlb->prot |= PAGE_VALID;
3818
        } else {
3819
            if (tlb->prot & PAGE_VALID) {
3820
                tlb->prot &= ~PAGE_VALID;
3821
                do_flush_tlbs = 1;
3822
            }
3823
        }
3824
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3825
        if (do_flush_tlbs)
3826
            tlb_flush(env, 1);
3827
        break;
3828
    case 1:
3829
        RPN = value & 0xFFFFFC0F;
3830
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3831
            tlb_flush(env, 1);
3832
        tlb->RPN = RPN;
3833
        break;
3834
    case 2:
3835
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
3836
        tlb->prot = tlb->prot & PAGE_VALID;
3837
        if (value & 0x1)
3838
            tlb->prot |= PAGE_READ << 4;
3839
        if (value & 0x2)
3840
            tlb->prot |= PAGE_WRITE << 4;
3841
        if (value & 0x4)
3842
            tlb->prot |= PAGE_EXEC << 4;
3843
        if (value & 0x8)
3844
            tlb->prot |= PAGE_READ;
3845
        if (value & 0x10)
3846
            tlb->prot |= PAGE_WRITE;
3847
        if (value & 0x20)
3848
            tlb->prot |= PAGE_EXEC;
3849
        break;
3850
    }
3851
}
3852

    
3853
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
3854
{
3855
    ppcemb_tlb_t *tlb;
3856
    target_ulong ret;
3857
    int size;
3858

    
3859
    entry &= 0x3F;
3860
    tlb = &env->tlb[entry].tlbe;
3861
    switch (word) {
3862
    default:
3863
        /* Just here to please gcc */
3864
    case 0:
3865
        ret = tlb->EPN;
3866
        size = booke_page_size_to_tlb(tlb->size);
3867
        if (size < 0 || size > 0xF)
3868
            size = 1;
3869
        ret |= size << 4;
3870
        if (tlb->attr & 0x1)
3871
            ret |= 0x100;
3872
        if (tlb->prot & PAGE_VALID)
3873
            ret |= 0x200;
3874
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3875
        env->spr[SPR_440_MMUCR] |= tlb->PID;
3876
        break;
3877
    case 1:
3878
        ret = tlb->RPN;
3879
        break;
3880
    case 2:
3881
        ret = tlb->attr & ~0x1;
3882
        if (tlb->prot & (PAGE_READ << 4))
3883
            ret |= 0x1;
3884
        if (tlb->prot & (PAGE_WRITE << 4))
3885
            ret |= 0x2;
3886
        if (tlb->prot & (PAGE_EXEC << 4))
3887
            ret |= 0x4;
3888
        if (tlb->prot & PAGE_READ)
3889
            ret |= 0x8;
3890
        if (tlb->prot & PAGE_WRITE)
3891
            ret |= 0x10;
3892
        if (tlb->prot & PAGE_EXEC)
3893
            ret |= 0x20;
3894
        break;
3895
    }
3896
    return ret;
3897
}
3898

    
3899
target_ulong helper_440_tlbsx (target_ulong address)
3900
{
3901
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
3902
}
3903

    
3904
#endif /* !CONFIG_USER_ONLY */