Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ 5ab09f33

History | View | Annotate | Download (114.1 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include <string.h>
21
#include "exec.h"
22
#include "host-utils.h"
23
#include "helper.h"
24

    
25
#include "helper_regs.h"
26

    
27
//#define DEBUG_OP
28
//#define DEBUG_EXCEPTIONS
29
//#define DEBUG_SOFTWARE_TLB
30

    
31
/*****************************************************************************/
32
/* Exceptions processing helpers */
33

    
34
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
35
{
36
#if 0
37
    printf("Raise exception %3x code : %d\n", exception, error_code);
38
#endif
39
    env->exception_index = exception;
40
    env->error_code = error_code;
41
    cpu_loop_exit();
42
}
43

    
44
void helper_raise_exception (uint32_t exception)
45
{
46
    helper_raise_exception_err(exception, 0);
47
}
48

    
49
/*****************************************************************************/
50
/* Registers load and stores */
51
target_ulong helper_load_cr (void)
52
{
53
    return (env->crf[0] << 28) |
54
           (env->crf[1] << 24) |
55
           (env->crf[2] << 20) |
56
           (env->crf[3] << 16) |
57
           (env->crf[4] << 12) |
58
           (env->crf[5] << 8) |
59
           (env->crf[6] << 4) |
60
           (env->crf[7] << 0);
61
}
62

    
63
void helper_store_cr (target_ulong val, uint32_t mask)
64
{
65
    int i, sh;
66

    
67
    for (i = 0, sh = 7; i < 8; i++, sh--) {
68
        if (mask & (1 << sh))
69
            env->crf[i] = (val >> (sh * 4)) & 0xFUL;
70
    }
71
}
72

    
73
/*****************************************************************************/
74
/* SPR accesses */
75
void helper_load_dump_spr (uint32_t sprn)
76
{
77
    if (loglevel != 0) {
78
        fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
79
                sprn, sprn, env->spr[sprn]);
80
    }
81
}
82

    
83
void helper_store_dump_spr (uint32_t sprn)
84
{
85
    if (loglevel != 0) {
86
        fprintf(logfile, "Write SPR %d %03x <= " ADDRX "\n",
87
                sprn, sprn, env->spr[sprn]);
88
    }
89
}
90

    
91
target_ulong helper_load_tbl (void)
92
{
93
    return cpu_ppc_load_tbl(env);
94
}
95

    
96
target_ulong helper_load_tbu (void)
97
{
98
    return cpu_ppc_load_tbu(env);
99
}
100

    
101
target_ulong helper_load_atbl (void)
102
{
103
    return cpu_ppc_load_atbl(env);
104
}
105

    
106
target_ulong helper_load_atbu (void)
107
{
108
    return cpu_ppc_load_atbu(env);
109
}
110

    
111
target_ulong helper_load_601_rtcl (void)
112
{
113
    return cpu_ppc601_load_rtcl(env);
114
}
115

    
116
target_ulong helper_load_601_rtcu (void)
117
{
118
    return cpu_ppc601_load_rtcu(env);
119
}
120

    
121
#if !defined(CONFIG_USER_ONLY)
122
#if defined (TARGET_PPC64)
123
void helper_store_asr (target_ulong val)
124
{
125
    ppc_store_asr(env, val);
126
}
127
#endif
128

    
129
void helper_store_sdr1 (target_ulong val)
130
{
131
    ppc_store_sdr1(env, val);
132
}
133

    
134
void helper_store_tbl (target_ulong val)
135
{
136
    cpu_ppc_store_tbl(env, val);
137
}
138

    
139
void helper_store_tbu (target_ulong val)
140
{
141
    cpu_ppc_store_tbu(env, val);
142
}
143

    
144
void helper_store_atbl (target_ulong val)
145
{
146
    cpu_ppc_store_atbl(env, val);
147
}
148

    
149
void helper_store_atbu (target_ulong val)
150
{
151
    cpu_ppc_store_atbu(env, val);
152
}
153

    
154
void helper_store_601_rtcl (target_ulong val)
155
{
156
    cpu_ppc601_store_rtcl(env, val);
157
}
158

    
159
void helper_store_601_rtcu (target_ulong val)
160
{
161
    cpu_ppc601_store_rtcu(env, val);
162
}
163

    
164
target_ulong helper_load_decr (void)
165
{
166
    return cpu_ppc_load_decr(env);
167
}
168

    
169
void helper_store_decr (target_ulong val)
170
{
171
    cpu_ppc_store_decr(env, val);
172
}
173

    
174
void helper_store_hid0_601 (target_ulong val)
175
{
176
    target_ulong hid0;
177

    
178
    hid0 = env->spr[SPR_HID0];
179
    if ((val ^ hid0) & 0x00000008) {
180
        /* Change current endianness */
181
        env->hflags &= ~(1 << MSR_LE);
182
        env->hflags_nmsr &= ~(1 << MSR_LE);
183
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
184
        env->hflags |= env->hflags_nmsr;
185
        if (loglevel != 0) {
186
            fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
187
                    __func__, val & 0x8 ? 'l' : 'b', env->hflags);
188
        }
189
    }
190
    env->spr[SPR_HID0] = (uint32_t)val;
191
}
192

    
193
void helper_store_403_pbr (uint32_t num, target_ulong value)
194
{
195
    if (likely(env->pb[num] != value)) {
196
        env->pb[num] = value;
197
        /* Should be optimized */
198
        tlb_flush(env, 1);
199
    }
200
}
201

    
202
target_ulong helper_load_40x_pit (void)
203
{
204
    return load_40x_pit(env);
205
}
206

    
207
void helper_store_40x_pit (target_ulong val)
208
{
209
    store_40x_pit(env, val);
210
}
211

    
212
void helper_store_40x_dbcr0 (target_ulong val)
213
{
214
    store_40x_dbcr0(env, val);
215
}
216

    
217
void helper_store_40x_sler (target_ulong val)
218
{
219
    store_40x_sler(env, val);
220
}
221

    
222
void helper_store_booke_tcr (target_ulong val)
223
{
224
    store_booke_tcr(env, val);
225
}
226

    
227
void helper_store_booke_tsr (target_ulong val)
228
{
229
    store_booke_tsr(env, val);
230
}
231

    
232
void helper_store_ibatu (uint32_t nr, target_ulong val)
233
{
234
    ppc_store_ibatu(env, nr, val);
235
}
236

    
237
void helper_store_ibatl (uint32_t nr, target_ulong val)
238
{
239
    ppc_store_ibatl(env, nr, val);
240
}
241

    
242
void helper_store_dbatu (uint32_t nr, target_ulong val)
243
{
244
    ppc_store_dbatu(env, nr, val);
245
}
246

    
247
void helper_store_dbatl (uint32_t nr, target_ulong val)
248
{
249
    ppc_store_dbatl(env, nr, val);
250
}
251

    
252
void helper_store_601_batl (uint32_t nr, target_ulong val)
253
{
254
    ppc_store_ibatl_601(env, nr, val);
255
}
256

    
257
void helper_store_601_batu (uint32_t nr, target_ulong val)
258
{
259
    ppc_store_ibatu_601(env, nr, val);
260
}
261
#endif
262

    
263
/*****************************************************************************/
264
/* Memory load and stores */
265

    
266
static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
267
{
268
#if defined(TARGET_PPC64)
269
        if (!msr_sf)
270
            return (uint32_t)(addr + arg);
271
        else
272
#endif
273
            return addr + arg;
274
}
275

    
276
void helper_lmw (target_ulong addr, uint32_t reg)
277
{
278
    for (; reg < 32; reg++) {
279
        if (msr_le)
280
            env->gpr[reg] = bswap32(ldl(addr));
281
        else
282
            env->gpr[reg] = ldl(addr);
283
        addr = addr_add(addr, 4);
284
    }
285
}
286

    
287
void helper_stmw (target_ulong addr, uint32_t reg)
288
{
289
    for (; reg < 32; reg++) {
290
        if (msr_le)
291
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
292
        else
293
            stl(addr, (uint32_t)env->gpr[reg]);
294
        addr = addr_add(addr, 4);
295
    }
296
}
297

    
298
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
299
{
300
    int sh;
301
    for (; nb > 3; nb -= 4) {
302
        env->gpr[reg] = ldl(addr);
303
        reg = (reg + 1) % 32;
304
        addr = addr_add(addr, 4);
305
    }
306
    if (unlikely(nb > 0)) {
307
        env->gpr[reg] = 0;
308
        for (sh = 24; nb > 0; nb--, sh -= 8) {
309
            env->gpr[reg] |= ldub(addr) << sh;
310
            addr = addr_add(addr, 1);
311
        }
312
    }
313
}
314
/* PPC32 specification says we must generate an exception if
315
 * rA is in the range of registers to be loaded.
316
 * In an other hand, IBM says this is valid, but rA won't be loaded.
317
 * For now, I'll follow the spec...
318
 */
319
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
320
{
321
    if (likely(xer_bc != 0)) {
322
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
323
                     (reg < rb && (reg + xer_bc) > rb))) {
324
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
325
                                       POWERPC_EXCP_INVAL |
326
                                       POWERPC_EXCP_INVAL_LSWX);
327
        } else {
328
            helper_lsw(addr, xer_bc, reg);
329
        }
330
    }
331
}
332

    
333
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
334
{
335
    int sh;
336
    for (; nb > 3; nb -= 4) {
337
        stl(addr, env->gpr[reg]);
338
        reg = (reg + 1) % 32;
339
        addr = addr_add(addr, 4);
340
    }
341
    if (unlikely(nb > 0)) {
342
        for (sh = 24; nb > 0; nb--, sh -= 8) {
343
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
344
            addr = addr_add(addr, 1);
345
        }
346
    }
347
}
348

    
349
static void do_dcbz(target_ulong addr, int dcache_line_size)
350
{
351
    addr &= ~(dcache_line_size - 1);
352
    int i;
353
    for (i = 0 ; i < dcache_line_size ; i += 4) {
354
        stl(addr + i , 0);
355
    }
356
    if (env->reserve == addr)
357
        env->reserve = (target_ulong)-1ULL;
358
}
359

    
360
void helper_dcbz(target_ulong addr)
361
{
362
    do_dcbz(addr, env->dcache_line_size);
363
}
364

    
365
void helper_dcbz_970(target_ulong addr)
366
{
367
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
368
        do_dcbz(addr, 32);
369
    else
370
        do_dcbz(addr, env->dcache_line_size);
371
}
372

    
373
void helper_icbi(target_ulong addr)
374
{
375
    uint32_t tmp;
376

    
377
    addr &= ~(env->dcache_line_size - 1);
378
    /* Invalidate one cache line :
379
     * PowerPC specification says this is to be treated like a load
380
     * (not a fetch) by the MMU. To be sure it will be so,
381
     * do the load "by hand".
382
     */
383
    tmp = ldl(addr);
384
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
385
}
386

    
387
// XXX: to be tested
388
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
389
{
390
    int i, c, d;
391
    d = 24;
392
    for (i = 0; i < xer_bc; i++) {
393
        c = ldub(addr);
394
        addr = addr_add(addr, 1);
395
        /* ra (if not 0) and rb are never modified */
396
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
397
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
398
        }
399
        if (unlikely(c == xer_cmp))
400
            break;
401
        if (likely(d != 0)) {
402
            d -= 8;
403
        } else {
404
            d = 24;
405
            reg++;
406
            reg = reg & 0x1F;
407
        }
408
    }
409
    return i;
410
}
411

    
412
/*****************************************************************************/
413
/* Fixed point operations helpers */
414
#if defined(TARGET_PPC64)
415

    
416
/* multiply high word */
417
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
418
{
419
    uint64_t tl, th;
420

    
421
    muls64(&tl, &th, arg1, arg2);
422
    return th;
423
}
424

    
425
/* multiply high word unsigned */
426
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
427
{
428
    uint64_t tl, th;
429

    
430
    mulu64(&tl, &th, arg1, arg2);
431
    return th;
432
}
433

    
434
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
435
{
436
    int64_t th;
437
    uint64_t tl;
438

    
439
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
440
    /* If th != 0 && th != -1, then we had an overflow */
441
    if (likely((uint64_t)(th + 1) <= 1)) {
442
        env->xer &= ~(1 << XER_OV);
443
    } else {
444
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
445
    }
446
    return (int64_t)tl;
447
}
448
#endif
449

    
450
target_ulong helper_cntlzw (target_ulong t)
451
{
452
    return clz32(t);
453
}
454

    
455
#if defined(TARGET_PPC64)
456
target_ulong helper_cntlzd (target_ulong t)
457
{
458
    return clz64(t);
459
}
460
#endif
461

    
462
/* shift right arithmetic helper */
463
target_ulong helper_sraw (target_ulong value, target_ulong shift)
464
{
465
    int32_t ret;
466

    
467
    if (likely(!(shift & 0x20))) {
468
        if (likely((uint32_t)shift != 0)) {
469
            shift &= 0x1f;
470
            ret = (int32_t)value >> shift;
471
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
472
                env->xer &= ~(1 << XER_CA);
473
            } else {
474
                env->xer |= (1 << XER_CA);
475
            }
476
        } else {
477
            ret = (int32_t)value;
478
            env->xer &= ~(1 << XER_CA);
479
        }
480
    } else {
481
        ret = (int32_t)value >> 31;
482
        if (ret) {
483
            env->xer |= (1 << XER_CA);
484
        } else {
485
            env->xer &= ~(1 << XER_CA);
486
        }
487
    }
488
    return (target_long)ret;
489
}
490

    
491
#if defined(TARGET_PPC64)
492
target_ulong helper_srad (target_ulong value, target_ulong shift)
493
{
494
    int64_t ret;
495

    
496
    if (likely(!(shift & 0x40))) {
497
        if (likely((uint64_t)shift != 0)) {
498
            shift &= 0x3f;
499
            ret = (int64_t)value >> shift;
500
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
501
                env->xer &= ~(1 << XER_CA);
502
            } else {
503
                env->xer |= (1 << XER_CA);
504
            }
505
        } else {
506
            ret = (int64_t)value;
507
            env->xer &= ~(1 << XER_CA);
508
        }
509
    } else {
510
        ret = (int64_t)value >> 63;
511
        if (ret) {
512
            env->xer |= (1 << XER_CA);
513
        } else {
514
            env->xer &= ~(1 << XER_CA);
515
        }
516
    }
517
    return ret;
518
}
519
#endif
520

    
521
target_ulong helper_popcntb (target_ulong val)
522
{
523
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
524
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
525
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
526
    return val;
527
}
528

    
529
#if defined(TARGET_PPC64)
530
target_ulong helper_popcntb_64 (target_ulong val)
531
{
532
    val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
533
    val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
534
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
535
    return val;
536
}
537
#endif
538

    
539
/*****************************************************************************/
540
/* Floating point operations helpers */
541
uint64_t helper_float32_to_float64(uint32_t arg)
542
{
543
    CPU_FloatU f;
544
    CPU_DoubleU d;
545
    f.l = arg;
546
    d.d = float32_to_float64(f.f, &env->fp_status);
547
    return d.ll;
548
}
549

    
550
uint32_t helper_float64_to_float32(uint64_t arg)
551
{
552
    CPU_FloatU f;
553
    CPU_DoubleU d;
554
    d.ll = arg;
555
    f.f = float64_to_float32(d.d, &env->fp_status);
556
    return f.l;
557
}
558

    
559
static always_inline int isden (float64 d)
560
{
561
    CPU_DoubleU u;
562

    
563
    u.d = d;
564

    
565
    return ((u.ll >> 52) & 0x7FF) == 0;
566
}
567

    
568
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
569
{
570
    CPU_DoubleU farg;
571
    int isneg;
572
    int ret;
573
    farg.ll = arg;
574
    isneg = float64_is_neg(farg.d);
575
    if (unlikely(float64_is_nan(farg.d))) {
576
        if (float64_is_signaling_nan(farg.d)) {
577
            /* Signaling NaN: flags are undefined */
578
            ret = 0x00;
579
        } else {
580
            /* Quiet NaN */
581
            ret = 0x11;
582
        }
583
    } else if (unlikely(float64_is_infinity(farg.d))) {
584
        /* +/- infinity */
585
        if (isneg)
586
            ret = 0x09;
587
        else
588
            ret = 0x05;
589
    } else {
590
        if (float64_is_zero(farg.d)) {
591
            /* +/- zero */
592
            if (isneg)
593
                ret = 0x12;
594
            else
595
                ret = 0x02;
596
        } else {
597
            if (isden(farg.d)) {
598
                /* Denormalized numbers */
599
                ret = 0x10;
600
            } else {
601
                /* Normalized numbers */
602
                ret = 0x00;
603
            }
604
            if (isneg) {
605
                ret |= 0x08;
606
            } else {
607
                ret |= 0x04;
608
            }
609
        }
610
    }
611
    if (set_fprf) {
612
        /* We update FPSCR_FPRF */
613
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
614
        env->fpscr |= ret << FPSCR_FPRF;
615
    }
616
    /* We just need fpcc to update Rc1 */
617
    return ret & 0xF;
618
}
619

    
620
/* Floating-point invalid operations exception */
621
static always_inline uint64_t fload_invalid_op_excp (int op)
622
{
623
    uint64_t ret = 0;
624
    int ve;
625

    
626
    ve = fpscr_ve;
627
    switch (op) {
628
    case POWERPC_EXCP_FP_VXSNAN:
629
        env->fpscr |= 1 << FPSCR_VXSNAN;
630
        break;
631
    case POWERPC_EXCP_FP_VXSOFT:
632
        env->fpscr |= 1 << FPSCR_VXSOFT;
633
        break;
634
    case POWERPC_EXCP_FP_VXISI:
635
        /* Magnitude subtraction of infinities */
636
        env->fpscr |= 1 << FPSCR_VXISI;
637
        goto update_arith;
638
    case POWERPC_EXCP_FP_VXIDI:
639
        /* Division of infinity by infinity */
640
        env->fpscr |= 1 << FPSCR_VXIDI;
641
        goto update_arith;
642
    case POWERPC_EXCP_FP_VXZDZ:
643
        /* Division of zero by zero */
644
        env->fpscr |= 1 << FPSCR_VXZDZ;
645
        goto update_arith;
646
    case POWERPC_EXCP_FP_VXIMZ:
647
        /* Multiplication of zero by infinity */
648
        env->fpscr |= 1 << FPSCR_VXIMZ;
649
        goto update_arith;
650
    case POWERPC_EXCP_FP_VXVC:
651
        /* Ordered comparison of NaN */
652
        env->fpscr |= 1 << FPSCR_VXVC;
653
        env->fpscr &= ~(0xF << FPSCR_FPCC);
654
        env->fpscr |= 0x11 << FPSCR_FPCC;
655
        /* We must update the target FPR before raising the exception */
656
        if (ve != 0) {
657
            env->exception_index = POWERPC_EXCP_PROGRAM;
658
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
659
            /* Update the floating-point enabled exception summary */
660
            env->fpscr |= 1 << FPSCR_FEX;
661
            /* Exception is differed */
662
            ve = 0;
663
        }
664
        break;
665
    case POWERPC_EXCP_FP_VXSQRT:
666
        /* Square root of a negative number */
667
        env->fpscr |= 1 << FPSCR_VXSQRT;
668
    update_arith:
669
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
670
        if (ve == 0) {
671
            /* Set the result to quiet NaN */
672
            ret = 0xFFF8000000000000ULL;
673
            env->fpscr &= ~(0xF << FPSCR_FPCC);
674
            env->fpscr |= 0x11 << FPSCR_FPCC;
675
        }
676
        break;
677
    case POWERPC_EXCP_FP_VXCVI:
678
        /* Invalid conversion */
679
        env->fpscr |= 1 << FPSCR_VXCVI;
680
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
681
        if (ve == 0) {
682
            /* Set the result to quiet NaN */
683
            ret = 0xFFF8000000000000ULL;
684
            env->fpscr &= ~(0xF << FPSCR_FPCC);
685
            env->fpscr |= 0x11 << FPSCR_FPCC;
686
        }
687
        break;
688
    }
689
    /* Update the floating-point invalid operation summary */
690
    env->fpscr |= 1 << FPSCR_VX;
691
    /* Update the floating-point exception summary */
692
    env->fpscr |= 1 << FPSCR_FX;
693
    if (ve != 0) {
694
        /* Update the floating-point enabled exception summary */
695
        env->fpscr |= 1 << FPSCR_FEX;
696
        if (msr_fe0 != 0 || msr_fe1 != 0)
697
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
698
    }
699
    return ret;
700
}
701

    
702
static always_inline void float_zero_divide_excp (void)
703
{
704
    env->fpscr |= 1 << FPSCR_ZX;
705
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
706
    /* Update the floating-point exception summary */
707
    env->fpscr |= 1 << FPSCR_FX;
708
    if (fpscr_ze != 0) {
709
        /* Update the floating-point enabled exception summary */
710
        env->fpscr |= 1 << FPSCR_FEX;
711
        if (msr_fe0 != 0 || msr_fe1 != 0) {
712
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
713
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
714
        }
715
    }
716
}
717

    
718
static always_inline void float_overflow_excp (void)
719
{
720
    env->fpscr |= 1 << FPSCR_OX;
721
    /* Update the floating-point exception summary */
722
    env->fpscr |= 1 << FPSCR_FX;
723
    if (fpscr_oe != 0) {
724
        /* XXX: should adjust the result */
725
        /* Update the floating-point enabled exception summary */
726
        env->fpscr |= 1 << FPSCR_FEX;
727
        /* We must update the target FPR before raising the exception */
728
        env->exception_index = POWERPC_EXCP_PROGRAM;
729
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
730
    } else {
731
        env->fpscr |= 1 << FPSCR_XX;
732
        env->fpscr |= 1 << FPSCR_FI;
733
    }
734
}
735

    
736
static always_inline void float_underflow_excp (void)
737
{
738
    env->fpscr |= 1 << FPSCR_UX;
739
    /* Update the floating-point exception summary */
740
    env->fpscr |= 1 << FPSCR_FX;
741
    if (fpscr_ue != 0) {
742
        /* XXX: should adjust the result */
743
        /* Update the floating-point enabled exception summary */
744
        env->fpscr |= 1 << FPSCR_FEX;
745
        /* We must update the target FPR before raising the exception */
746
        env->exception_index = POWERPC_EXCP_PROGRAM;
747
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
748
    }
749
}
750

    
751
static always_inline void float_inexact_excp (void)
752
{
753
    env->fpscr |= 1 << FPSCR_XX;
754
    /* Update the floating-point exception summary */
755
    env->fpscr |= 1 << FPSCR_FX;
756
    if (fpscr_xe != 0) {
757
        /* Update the floating-point enabled exception summary */
758
        env->fpscr |= 1 << FPSCR_FEX;
759
        /* We must update the target FPR before raising the exception */
760
        env->exception_index = POWERPC_EXCP_PROGRAM;
761
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
762
    }
763
}
764

    
765
static always_inline void fpscr_set_rounding_mode (void)
766
{
767
    int rnd_type;
768

    
769
    /* Set rounding mode */
770
    switch (fpscr_rn) {
771
    case 0:
772
        /* Best approximation (round to nearest) */
773
        rnd_type = float_round_nearest_even;
774
        break;
775
    case 1:
776
        /* Smaller magnitude (round toward zero) */
777
        rnd_type = float_round_to_zero;
778
        break;
779
    case 2:
780
        /* Round toward +infinite */
781
        rnd_type = float_round_up;
782
        break;
783
    default:
784
    case 3:
785
        /* Round toward -infinite */
786
        rnd_type = float_round_down;
787
        break;
788
    }
789
    set_float_rounding_mode(rnd_type, &env->fp_status);
790
}
791

    
792
void helper_fpscr_clrbit (uint32_t bit)
793
{
794
    int prev;
795

    
796
    prev = (env->fpscr >> bit) & 1;
797
    env->fpscr &= ~(1 << bit);
798
    if (prev == 1) {
799
        switch (bit) {
800
        case FPSCR_RN1:
801
        case FPSCR_RN:
802
            fpscr_set_rounding_mode();
803
            break;
804
        default:
805
            break;
806
        }
807
    }
808
}
809

    
810
void helper_fpscr_setbit (uint32_t bit)
811
{
812
    int prev;
813

    
814
    prev = (env->fpscr >> bit) & 1;
815
    env->fpscr |= 1 << bit;
816
    if (prev == 0) {
817
        switch (bit) {
818
        case FPSCR_VX:
819
            env->fpscr |= 1 << FPSCR_FX;
820
            if (fpscr_ve)
821
                goto raise_ve;
822
        case FPSCR_OX:
823
            env->fpscr |= 1 << FPSCR_FX;
824
            if (fpscr_oe)
825
                goto raise_oe;
826
            break;
827
        case FPSCR_UX:
828
            env->fpscr |= 1 << FPSCR_FX;
829
            if (fpscr_ue)
830
                goto raise_ue;
831
            break;
832
        case FPSCR_ZX:
833
            env->fpscr |= 1 << FPSCR_FX;
834
            if (fpscr_ze)
835
                goto raise_ze;
836
            break;
837
        case FPSCR_XX:
838
            env->fpscr |= 1 << FPSCR_FX;
839
            if (fpscr_xe)
840
                goto raise_xe;
841
            break;
842
        case FPSCR_VXSNAN:
843
        case FPSCR_VXISI:
844
        case FPSCR_VXIDI:
845
        case FPSCR_VXZDZ:
846
        case FPSCR_VXIMZ:
847
        case FPSCR_VXVC:
848
        case FPSCR_VXSOFT:
849
        case FPSCR_VXSQRT:
850
        case FPSCR_VXCVI:
851
            env->fpscr |= 1 << FPSCR_VX;
852
            env->fpscr |= 1 << FPSCR_FX;
853
            if (fpscr_ve != 0)
854
                goto raise_ve;
855
            break;
856
        case FPSCR_VE:
857
            if (fpscr_vx != 0) {
858
            raise_ve:
859
                env->error_code = POWERPC_EXCP_FP;
860
                if (fpscr_vxsnan)
861
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
862
                if (fpscr_vxisi)
863
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
864
                if (fpscr_vxidi)
865
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
866
                if (fpscr_vxzdz)
867
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
868
                if (fpscr_vximz)
869
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
870
                if (fpscr_vxvc)
871
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
872
                if (fpscr_vxsoft)
873
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
874
                if (fpscr_vxsqrt)
875
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
876
                if (fpscr_vxcvi)
877
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
878
                goto raise_excp;
879
            }
880
            break;
881
        case FPSCR_OE:
882
            if (fpscr_ox != 0) {
883
            raise_oe:
884
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
885
                goto raise_excp;
886
            }
887
            break;
888
        case FPSCR_UE:
889
            if (fpscr_ux != 0) {
890
            raise_ue:
891
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
892
                goto raise_excp;
893
            }
894
            break;
895
        case FPSCR_ZE:
896
            if (fpscr_zx != 0) {
897
            raise_ze:
898
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
899
                goto raise_excp;
900
            }
901
            break;
902
        case FPSCR_XE:
903
            if (fpscr_xx != 0) {
904
            raise_xe:
905
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
906
                goto raise_excp;
907
            }
908
            break;
909
        case FPSCR_RN1:
910
        case FPSCR_RN:
911
            fpscr_set_rounding_mode();
912
            break;
913
        default:
914
            break;
915
        raise_excp:
916
            /* Update the floating-point enabled exception summary */
917
            env->fpscr |= 1 << FPSCR_FEX;
918
                /* We have to update Rc1 before raising the exception */
919
            env->exception_index = POWERPC_EXCP_PROGRAM;
920
            break;
921
        }
922
    }
923
}
924

    
925
void helper_store_fpscr (uint64_t arg, uint32_t mask)
926
{
927
    /*
928
     * We use only the 32 LSB of the incoming fpr
929
     */
930
    uint32_t prev, new;
931
    int i;
932

    
933
    prev = env->fpscr;
934
    new = (uint32_t)arg;
935
    new &= ~0x60000000;
936
    new |= prev & 0x60000000;
937
    for (i = 0; i < 8; i++) {
938
        if (mask & (1 << i)) {
939
            env->fpscr &= ~(0xF << (4 * i));
940
            env->fpscr |= new & (0xF << (4 * i));
941
        }
942
    }
943
    /* Update VX and FEX */
944
    if (fpscr_ix != 0)
945
        env->fpscr |= 1 << FPSCR_VX;
946
    else
947
        env->fpscr &= ~(1 << FPSCR_VX);
948
    if ((fpscr_ex & fpscr_eex) != 0) {
949
        env->fpscr |= 1 << FPSCR_FEX;
950
        env->exception_index = POWERPC_EXCP_PROGRAM;
951
        /* XXX: we should compute it properly */
952
        env->error_code = POWERPC_EXCP_FP;
953
    }
954
    else
955
        env->fpscr &= ~(1 << FPSCR_FEX);
956
    fpscr_set_rounding_mode();
957
}
958

    
959
void helper_float_check_status (void)
960
{
961
#ifdef CONFIG_SOFTFLOAT
962
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
963
        (env->error_code & POWERPC_EXCP_FP)) {
964
        /* Differred floating-point exception after target FPR update */
965
        if (msr_fe0 != 0 || msr_fe1 != 0)
966
            helper_raise_exception_err(env->exception_index, env->error_code);
967
    } else {
968
        int status = get_float_exception_flags(&env->fp_status);
969
        if (status & float_flag_divbyzero) {
970
            float_zero_divide_excp();
971
        } else if (status & float_flag_overflow) {
972
            float_overflow_excp();
973
        } else if (status & float_flag_underflow) {
974
            float_underflow_excp();
975
        } else if (status & float_flag_inexact) {
976
            float_inexact_excp();
977
        }
978
    }
979
#else
980
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
981
        (env->error_code & POWERPC_EXCP_FP)) {
982
        /* Differred floating-point exception after target FPR update */
983
        if (msr_fe0 != 0 || msr_fe1 != 0)
984
            helper_raise_exception_err(env->exception_index, env->error_code);
985
    }
986
#endif
987
}
988

    
989
#ifdef CONFIG_SOFTFLOAT
990
void helper_reset_fpstatus (void)
991
{
992
    set_float_exception_flags(0, &env->fp_status);
993
}
994
#endif
995

    
996
/* fadd - fadd. */
997
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
998
{
999
    CPU_DoubleU farg1, farg2;
1000

    
1001
    farg1.ll = arg1;
1002
    farg2.ll = arg2;
1003
#if USE_PRECISE_EMULATION
1004
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1005
                 float64_is_signaling_nan(farg2.d))) {
1006
        /* sNaN addition */
1007
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1008
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1009
                      float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1010
        /* Magnitude subtraction of infinities */
1011
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1012
    } else {
1013
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1014
    }
1015
#else
1016
    farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1017
#endif
1018
    return farg1.ll;
1019
}
1020

    
1021
/* fsub - fsub. */
1022
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1023
{
1024
    CPU_DoubleU farg1, farg2;
1025

    
1026
    farg1.ll = arg1;
1027
    farg2.ll = arg2;
1028
#if USE_PRECISE_EMULATION
1029
{
1030
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1031
                 float64_is_signaling_nan(farg2.d))) {
1032
        /* sNaN subtraction */
1033
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1034
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1035
                      float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1036
        /* Magnitude subtraction of infinities */
1037
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1038
    } else {
1039
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1040
    }
1041
}
1042
#else
1043
    farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1044
#endif
1045
    return farg1.ll;
1046
}
1047

    
1048
/* fmul - fmul. */
1049
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1050
{
1051
    CPU_DoubleU farg1, farg2;
1052

    
1053
    farg1.ll = arg1;
1054
    farg2.ll = arg2;
1055
#if USE_PRECISE_EMULATION
1056
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1057
                 float64_is_signaling_nan(farg2.d))) {
1058
        /* sNaN multiplication */
1059
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1060
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1061
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1062
        /* Multiplication of zero by infinity */
1063
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1064
    } else {
1065
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1066
    }
1067
#else
1068
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1069
#endif
1070
    return farg1.ll;
1071
}
1072

    
1073
/* fdiv - fdiv. */
1074
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1075
{
1076
    CPU_DoubleU farg1, farg2;
1077

    
1078
    farg1.ll = arg1;
1079
    farg2.ll = arg2;
1080
#if USE_PRECISE_EMULATION
1081
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1082
                 float64_is_signaling_nan(farg2.d))) {
1083
        /* sNaN division */
1084
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1085
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1086
        /* Division of infinity by infinity */
1087
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1088
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1089
        /* Division of zero by zero */
1090
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1091
    } else {
1092
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1093
    }
1094
#else
1095
    farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1096
#endif
1097
    return farg1.ll;
1098
}
1099

    
1100
/* fabs */
1101
uint64_t helper_fabs (uint64_t arg)
1102
{
1103
    CPU_DoubleU farg;
1104

    
1105
    farg.ll = arg;
1106
    farg.d = float64_abs(farg.d);
1107
    return farg.ll;
1108
}
1109

    
1110
/* fnabs */
1111
uint64_t helper_fnabs (uint64_t arg)
1112
{
1113
    CPU_DoubleU farg;
1114

    
1115
    farg.ll = arg;
1116
    farg.d = float64_abs(farg.d);
1117
    farg.d = float64_chs(farg.d);
1118
    return farg.ll;
1119
}
1120

    
1121
/* fneg */
1122
uint64_t helper_fneg (uint64_t arg)
1123
{
1124
    CPU_DoubleU farg;
1125

    
1126
    farg.ll = arg;
1127
    farg.d = float64_chs(farg.d);
1128
    return farg.ll;
1129
}
1130

    
1131
/* fctiw - fctiw. */
1132
uint64_t helper_fctiw (uint64_t arg)
1133
{
1134
    CPU_DoubleU farg;
1135
    farg.ll = arg;
1136

    
1137
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1138
        /* sNaN conversion */
1139
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1140
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1141
        /* qNan / infinity conversion */
1142
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1143
    } else {
1144
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1145
#if USE_PRECISE_EMULATION
1146
        /* XXX: higher bits are not supposed to be significant.
1147
         *     to make tests easier, return the same as a real PowerPC 750
1148
         */
1149
        farg.ll |= 0xFFF80000ULL << 32;
1150
#endif
1151
    }
1152
    return farg.ll;
1153
}
1154

    
1155
/* fctiwz - fctiwz. */
1156
uint64_t helper_fctiwz (uint64_t arg)
1157
{
1158
    CPU_DoubleU farg;
1159
    farg.ll = arg;
1160

    
1161
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1162
        /* sNaN conversion */
1163
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1164
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1165
        /* qNan / infinity conversion */
1166
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1167
    } else {
1168
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1169
#if USE_PRECISE_EMULATION
1170
        /* XXX: higher bits are not supposed to be significant.
1171
         *     to make tests easier, return the same as a real PowerPC 750
1172
         */
1173
        farg.ll |= 0xFFF80000ULL << 32;
1174
#endif
1175
    }
1176
    return farg.ll;
1177
}
1178

    
1179
#if defined(TARGET_PPC64)
1180
/* fcfid - fcfid. */
1181
uint64_t helper_fcfid (uint64_t arg)
1182
{
1183
    CPU_DoubleU farg;
1184
    farg.d = int64_to_float64(arg, &env->fp_status);
1185
    return farg.ll;
1186
}
1187

    
1188
/* fctid - fctid. */
1189
uint64_t helper_fctid (uint64_t arg)
1190
{
1191
    CPU_DoubleU farg;
1192
    farg.ll = arg;
1193

    
1194
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1195
        /* sNaN conversion */
1196
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1197
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1198
        /* qNan / infinity conversion */
1199
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1200
    } else {
1201
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1202
    }
1203
    return farg.ll;
1204
}
1205

    
1206
/* fctidz - fctidz. */
1207
uint64_t helper_fctidz (uint64_t arg)
1208
{
1209
    CPU_DoubleU farg;
1210
    farg.ll = arg;
1211

    
1212
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1213
        /* sNaN conversion */
1214
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1215
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1216
        /* qNan / infinity conversion */
1217
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1218
    } else {
1219
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1220
    }
1221
    return farg.ll;
1222
}
1223

    
1224
#endif
1225

    
1226
static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1227
{
1228
    CPU_DoubleU farg;
1229
    farg.ll = arg;
1230

    
1231
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1232
        /* sNaN round */
1233
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1234
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1235
        /* qNan / infinity round */
1236
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1237
    } else {
1238
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1239
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1240
        /* Restore rounding mode from FPSCR */
1241
        fpscr_set_rounding_mode();
1242
    }
1243
    return farg.ll;
1244
}
1245

    
1246
uint64_t helper_frin (uint64_t arg)
1247
{
1248
    return do_fri(arg, float_round_nearest_even);
1249
}
1250

    
1251
uint64_t helper_friz (uint64_t arg)
1252
{
1253
    return do_fri(arg, float_round_to_zero);
1254
}
1255

    
1256
uint64_t helper_frip (uint64_t arg)
1257
{
1258
    return do_fri(arg, float_round_up);
1259
}
1260

    
1261
uint64_t helper_frim (uint64_t arg)
1262
{
1263
    return do_fri(arg, float_round_down);
1264
}
1265

    
1266
/* fmadd - fmadd. */
1267
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1268
{
1269
    CPU_DoubleU farg1, farg2, farg3;
1270

    
1271
    farg1.ll = arg1;
1272
    farg2.ll = arg2;
1273
    farg3.ll = arg3;
1274
#if USE_PRECISE_EMULATION
1275
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1276
                 float64_is_signaling_nan(farg2.d) ||
1277
                 float64_is_signaling_nan(farg3.d))) {
1278
        /* sNaN operation */
1279
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1280
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1281
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1282
        /* Multiplication of zero by infinity */
1283
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1284
    } else {
1285
#ifdef FLOAT128
1286
        /* This is the way the PowerPC specification defines it */
1287
        float128 ft0_128, ft1_128;
1288

    
1289
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1290
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1291
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1292
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1293
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1294
            /* Magnitude subtraction of infinities */
1295
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1296
        } else {
1297
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1298
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1299
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1300
        }
1301
#else
1302
        /* This is OK on x86 hosts */
1303
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1304
#endif
1305
    }
1306
#else
1307
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1308
    farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1309
#endif
1310
    return farg1.ll;
1311
}
1312

    
1313
/* fmsub - fmsub. */
1314
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1315
{
1316
    CPU_DoubleU farg1, farg2, farg3;
1317

    
1318
    farg1.ll = arg1;
1319
    farg2.ll = arg2;
1320
    farg3.ll = arg3;
1321
#if USE_PRECISE_EMULATION
1322
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1323
                 float64_is_signaling_nan(farg2.d) ||
1324
                 float64_is_signaling_nan(farg3.d))) {
1325
        /* sNaN operation */
1326
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1327
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1328
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1329
        /* Multiplication of zero by infinity */
1330
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1331
    } else {
1332
#ifdef FLOAT128
1333
        /* This is the way the PowerPC specification defines it */
1334
        float128 ft0_128, ft1_128;
1335

    
1336
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1337
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1338
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1339
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1340
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1341
            /* Magnitude subtraction of infinities */
1342
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1343
        } else {
1344
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1345
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1346
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1347
        }
1348
#else
1349
        /* This is OK on x86 hosts */
1350
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1351
#endif
1352
    }
1353
#else
1354
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1355
    farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1356
#endif
1357
    return farg1.ll;
1358
}
1359

    
1360
/* fnmadd - fnmadd. */
1361
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1362
{
1363
    CPU_DoubleU farg1, farg2, farg3;
1364

    
1365
    farg1.ll = arg1;
1366
    farg2.ll = arg2;
1367
    farg3.ll = arg3;
1368

    
1369
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1370
                 float64_is_signaling_nan(farg2.d) ||
1371
                 float64_is_signaling_nan(farg3.d))) {
1372
        /* sNaN operation */
1373
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1374
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1375
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1376
        /* Multiplication of zero by infinity */
1377
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1378
    } else {
1379
#if USE_PRECISE_EMULATION
1380
#ifdef FLOAT128
1381
        /* This is the way the PowerPC specification defines it */
1382
        float128 ft0_128, ft1_128;
1383

    
1384
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1385
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1386
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1387
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1388
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1389
            /* Magnitude subtraction of infinities */
1390
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1391
        } else {
1392
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1393
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1394
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1395
        }
1396
#else
1397
        /* This is OK on x86 hosts */
1398
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1399
#endif
1400
#else
1401
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1402
        farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1403
#endif
1404
        if (likely(!float64_is_nan(farg1.d)))
1405
            farg1.d = float64_chs(farg1.d);
1406
    }
1407
    return farg1.ll;
1408
}
1409

    
1410
/* fnmsub - fnmsub. */
1411
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1412
{
1413
    CPU_DoubleU farg1, farg2, farg3;
1414

    
1415
    farg1.ll = arg1;
1416
    farg2.ll = arg2;
1417
    farg3.ll = arg3;
1418

    
1419
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1420
                 float64_is_signaling_nan(farg2.d) ||
1421
                 float64_is_signaling_nan(farg3.d))) {
1422
        /* sNaN operation */
1423
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1424
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1425
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1426
        /* Multiplication of zero by infinity */
1427
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1428
    } else {
1429
#if USE_PRECISE_EMULATION
1430
#ifdef FLOAT128
1431
        /* This is the way the PowerPC specification defines it */
1432
        float128 ft0_128, ft1_128;
1433

    
1434
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1435
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1436
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1437
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1438
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1439
            /* Magnitude subtraction of infinities */
1440
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1441
        } else {
1442
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1443
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1444
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1445
        }
1446
#else
1447
        /* This is OK on x86 hosts */
1448
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1449
#endif
1450
#else
1451
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1452
        farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1453
#endif
1454
        if (likely(!float64_is_nan(farg1.d)))
1455
            farg1.d = float64_chs(farg1.d);
1456
    }
1457
    return farg1.ll;
1458
}
1459

    
1460
/* frsp - frsp. */
1461
uint64_t helper_frsp (uint64_t arg)
1462
{
1463
    CPU_DoubleU farg;
1464
    float32 f32;
1465
    farg.ll = arg;
1466

    
1467
#if USE_PRECISE_EMULATION
1468
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1469
        /* sNaN square root */
1470
       farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1471
    } else {
1472
       f32 = float64_to_float32(farg.d, &env->fp_status);
1473
       farg.d = float32_to_float64(f32, &env->fp_status);
1474
    }
1475
#else
1476
    f32 = float64_to_float32(farg.d, &env->fp_status);
1477
    farg.d = float32_to_float64(f32, &env->fp_status);
1478
#endif
1479
    return farg.ll;
1480
}
1481

    
1482
/* fsqrt - fsqrt. */
1483
uint64_t helper_fsqrt (uint64_t arg)
1484
{
1485
    CPU_DoubleU farg;
1486
    farg.ll = arg;
1487

    
1488
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1489
        /* sNaN square root */
1490
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1491
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1492
        /* Square root of a negative nonzero number */
1493
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1494
    } else {
1495
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1496
    }
1497
    return farg.ll;
1498
}
1499

    
1500
/* fre - fre. */
1501
uint64_t helper_fre (uint64_t arg)
1502
{
1503
    CPU_DoubleU fone, farg;
1504
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1505
    farg.ll = arg;
1506

    
1507
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1508
        /* sNaN reciprocal */
1509
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1510
    } else {
1511
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1512
    }
1513
    return farg.d;
1514
}
1515

    
1516
/* fres - fres. */
1517
uint64_t helper_fres (uint64_t arg)
1518
{
1519
    CPU_DoubleU fone, farg;
1520
    float32 f32;
1521
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1522
    farg.ll = arg;
1523

    
1524
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1525
        /* sNaN reciprocal */
1526
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1527
    } else {
1528
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1529
        f32 = float64_to_float32(farg.d, &env->fp_status);
1530
        farg.d = float32_to_float64(f32, &env->fp_status);
1531
    }
1532
    return farg.ll;
1533
}
1534

    
1535
/* frsqrte  - frsqrte. */
1536
uint64_t helper_frsqrte (uint64_t arg)
1537
{
1538
    CPU_DoubleU fone, farg;
1539
    float32 f32;
1540
    fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1541
    farg.ll = arg;
1542

    
1543
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1544
        /* sNaN reciprocal square root */
1545
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1546
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1547
        /* Reciprocal square root of a negative nonzero number */
1548
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1549
    } else {
1550
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1551
        farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1552
        f32 = float64_to_float32(farg.d, &env->fp_status);
1553
        farg.d = float32_to_float64(f32, &env->fp_status);
1554
    }
1555
    return farg.ll;
1556
}
1557

    
1558
/* fsel - fsel. */
1559
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1560
{
1561
    CPU_DoubleU farg1;
1562

    
1563
    farg1.ll = arg1;
1564

    
1565
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1566
        return arg2;
1567
    else
1568
        return arg3;
1569
}
1570

    
1571
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1572
{
1573
    CPU_DoubleU farg1, farg2;
1574
    uint32_t ret = 0;
1575
    farg1.ll = arg1;
1576
    farg2.ll = arg2;
1577

    
1578
    if (unlikely(float64_is_nan(farg1.d) ||
1579
                 float64_is_nan(farg2.d))) {
1580
        ret = 0x01UL;
1581
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1582
        ret = 0x08UL;
1583
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1584
        ret = 0x04UL;
1585
    } else {
1586
        ret = 0x02UL;
1587
    }
1588

    
1589
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1590
    env->fpscr |= ret << FPSCR_FPRF;
1591
    env->crf[crfD] = ret;
1592
    if (unlikely(ret == 0x01UL
1593
                 && (float64_is_signaling_nan(farg1.d) ||
1594
                     float64_is_signaling_nan(farg2.d)))) {
1595
        /* sNaN comparison */
1596
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1597
    }
1598
}
1599

    
1600
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1601
{
1602
    CPU_DoubleU farg1, farg2;
1603
    uint32_t ret = 0;
1604
    farg1.ll = arg1;
1605
    farg2.ll = arg2;
1606

    
1607
    if (unlikely(float64_is_nan(farg1.d) ||
1608
                 float64_is_nan(farg2.d))) {
1609
        ret = 0x01UL;
1610
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1611
        ret = 0x08UL;
1612
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1613
        ret = 0x04UL;
1614
    } else {
1615
        ret = 0x02UL;
1616
    }
1617

    
1618
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1619
    env->fpscr |= ret << FPSCR_FPRF;
1620
    env->crf[crfD] = ret;
1621
    if (unlikely (ret == 0x01UL)) {
1622
        if (float64_is_signaling_nan(farg1.d) ||
1623
            float64_is_signaling_nan(farg2.d)) {
1624
            /* sNaN comparison */
1625
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1626
                                  POWERPC_EXCP_FP_VXVC);
1627
        } else {
1628
            /* qNaN comparison */
1629
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1630
        }
1631
    }
1632
}
1633

    
1634
#if !defined (CONFIG_USER_ONLY)
1635
void helper_store_msr (target_ulong val)
1636
{
1637
    val = hreg_store_msr(env, val, 0);
1638
    if (val != 0) {
1639
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1640
        helper_raise_exception(val);
1641
    }
1642
}
1643

    
1644
static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1645
                                    target_ulong msrm, int keep_msrh)
1646
{
1647
#if defined(TARGET_PPC64)
1648
    if (msr & (1ULL << MSR_SF)) {
1649
        nip = (uint64_t)nip;
1650
        msr &= (uint64_t)msrm;
1651
    } else {
1652
        nip = (uint32_t)nip;
1653
        msr = (uint32_t)(msr & msrm);
1654
        if (keep_msrh)
1655
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1656
    }
1657
#else
1658
    nip = (uint32_t)nip;
1659
    msr &= (uint32_t)msrm;
1660
#endif
1661
    /* XXX: beware: this is false if VLE is supported */
1662
    env->nip = nip & ~((target_ulong)0x00000003);
1663
    hreg_store_msr(env, msr, 1);
1664
#if defined (DEBUG_OP)
1665
    cpu_dump_rfi(env->nip, env->msr);
1666
#endif
1667
    /* No need to raise an exception here,
1668
     * as rfi is always the last insn of a TB
1669
     */
1670
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1671
}
1672

    
1673
void helper_rfi (void)
1674
{
1675
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1676
           ~((target_ulong)0xFFFF0000), 1);
1677
}
1678

    
1679
#if defined(TARGET_PPC64)
1680
void helper_rfid (void)
1681
{
1682
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1683
           ~((target_ulong)0xFFFF0000), 0);
1684
}
1685

    
1686
void helper_hrfid (void)
1687
{
1688
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1689
           ~((target_ulong)0xFFFF0000), 0);
1690
}
1691
#endif
1692
#endif
1693

    
1694
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1695
{
1696
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1697
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1698
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1699
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1700
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1701
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1702
    }
1703
}
1704

    
1705
#if defined(TARGET_PPC64)
1706
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1707
{
1708
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1709
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1710
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1711
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1712
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1713
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1714
}
1715
#endif
1716

    
1717
/*****************************************************************************/
1718
/* PowerPC 601 specific instructions (POWER bridge) */
1719

    
1720
target_ulong helper_clcs (uint32_t arg)
1721
{
1722
    switch (arg) {
1723
    case 0x0CUL:
1724
        /* Instruction cache line size */
1725
        return env->icache_line_size;
1726
        break;
1727
    case 0x0DUL:
1728
        /* Data cache line size */
1729
        return env->dcache_line_size;
1730
        break;
1731
    case 0x0EUL:
1732
        /* Minimum cache line size */
1733
        return (env->icache_line_size < env->dcache_line_size) ?
1734
                env->icache_line_size : env->dcache_line_size;
1735
        break;
1736
    case 0x0FUL:
1737
        /* Maximum cache line size */
1738
        return (env->icache_line_size > env->dcache_line_size) ?
1739
                env->icache_line_size : env->dcache_line_size;
1740
        break;
1741
    default:
1742
        /* Undefined */
1743
        return 0;
1744
        break;
1745
    }
1746
}
1747

    
1748
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1749
{
1750
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1751

    
1752
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1753
        (int32_t)arg2 == 0) {
1754
        env->spr[SPR_MQ] = 0;
1755
        return INT32_MIN;
1756
    } else {
1757
        env->spr[SPR_MQ] = tmp % arg2;
1758
        return  tmp / (int32_t)arg2;
1759
    }
1760
}
1761

    
1762
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1763
{
1764
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1765

    
1766
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1767
        (int32_t)arg2 == 0) {
1768
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1769
        env->spr[SPR_MQ] = 0;
1770
        return INT32_MIN;
1771
    } else {
1772
        env->spr[SPR_MQ] = tmp % arg2;
1773
        tmp /= (int32_t)arg2;
1774
        if ((int32_t)tmp != tmp) {
1775
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1776
        } else {
1777
            env->xer &= ~(1 << XER_OV);
1778
        }
1779
        return tmp;
1780
    }
1781
}
1782

    
1783
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1784
{
1785
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1786
        (int32_t)arg2 == 0) {
1787
        env->spr[SPR_MQ] = 0;
1788
        return INT32_MIN;
1789
    } else {
1790
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1791
        return (int32_t)arg1 / (int32_t)arg2;
1792
    }
1793
}
1794

    
1795
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1796
{
1797
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1798
        (int32_t)arg2 == 0) {
1799
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1800
        env->spr[SPR_MQ] = 0;
1801
        return INT32_MIN;
1802
    } else {
1803
        env->xer &= ~(1 << XER_OV);
1804
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1805
        return (int32_t)arg1 / (int32_t)arg2;
1806
    }
1807
}
1808

    
1809
#if !defined (CONFIG_USER_ONLY)
1810
target_ulong helper_rac (target_ulong addr)
1811
{
1812
    mmu_ctx_t ctx;
1813
    int nb_BATs;
1814
    target_ulong ret = 0;
1815

    
1816
    /* We don't have to generate many instances of this instruction,
1817
     * as rac is supervisor only.
1818
     */
1819
    /* XXX: FIX THIS: Pretend we have no BAT */
1820
    nb_BATs = env->nb_BATs;
1821
    env->nb_BATs = 0;
1822
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1823
        ret = ctx.raddr;
1824
    env->nb_BATs = nb_BATs;
1825
    return ret;
1826
}
1827

    
1828
void helper_rfsvc (void)
1829
{
1830
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1831
}
1832
#endif
1833

    
1834
/*****************************************************************************/
1835
/* 602 specific instructions */
1836
/* mfrom is the most crazy instruction ever seen, imho ! */
1837
/* Real implementation uses a ROM table. Do the same */
1838
/* Extremly decomposed:
1839
 *                      -arg / 256
1840
 * return 256 * log10(10           + 1.0) + 0.5
1841
 */
1842
#if !defined (CONFIG_USER_ONLY)
1843
target_ulong helper_602_mfrom (target_ulong arg)
1844
{
1845
    if (likely(arg < 602)) {
1846
#include "mfrom_table.c"
1847
        return mfrom_ROM_table[arg];
1848
    } else {
1849
        return 0;
1850
    }
1851
}
1852
#endif
1853

    
1854
/*****************************************************************************/
1855
/* Embedded PowerPC specific helpers */
1856

    
1857
/* XXX: to be improved to check access rights when in user-mode */
1858
target_ulong helper_load_dcr (target_ulong dcrn)
1859
{
1860
    target_ulong val = 0;
1861

    
1862
    if (unlikely(env->dcr_env == NULL)) {
1863
        if (loglevel != 0) {
1864
            fprintf(logfile, "No DCR environment\n");
1865
        }
1866
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1867
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1868
    } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1869
        if (loglevel != 0) {
1870
            fprintf(logfile, "DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1871
        }
1872
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1873
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1874
    }
1875
    return val;
1876
}
1877

    
1878
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1879
{
1880
    if (unlikely(env->dcr_env == NULL)) {
1881
        if (loglevel != 0) {
1882
            fprintf(logfile, "No DCR environment\n");
1883
        }
1884
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1885
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1886
    } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1887
        if (loglevel != 0) {
1888
            fprintf(logfile, "DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1889
        }
1890
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1891
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1892
    }
1893
}
1894

    
1895
#if !defined(CONFIG_USER_ONLY)
1896
void helper_40x_rfci (void)
1897
{
1898
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1899
           ~((target_ulong)0xFFFF0000), 0);
1900
}
1901

    
1902
void helper_rfci (void)
1903
{
1904
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1905
           ~((target_ulong)0x3FFF0000), 0);
1906
}
1907

    
1908
void helper_rfdi (void)
1909
{
1910
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1911
           ~((target_ulong)0x3FFF0000), 0);
1912
}
1913

    
1914
void helper_rfmci (void)
1915
{
1916
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1917
           ~((target_ulong)0x3FFF0000), 0);
1918
}
1919
#endif
1920

    
1921
/* 440 specific */
1922
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1923
{
1924
    target_ulong mask;
1925
    int i;
1926

    
1927
    i = 1;
1928
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1929
        if ((high & mask) == 0) {
1930
            if (update_Rc) {
1931
                env->crf[0] = 0x4;
1932
            }
1933
            goto done;
1934
        }
1935
        i++;
1936
    }
1937
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1938
        if ((low & mask) == 0) {
1939
            if (update_Rc) {
1940
                env->crf[0] = 0x8;
1941
            }
1942
            goto done;
1943
        }
1944
        i++;
1945
    }
1946
    if (update_Rc) {
1947
        env->crf[0] = 0x2;
1948
    }
1949
 done:
1950
    env->xer = (env->xer & ~0x7F) | i;
1951
    if (update_Rc) {
1952
        env->crf[0] |= xer_so;
1953
    }
1954
    return i;
1955
}
1956

    
1957
/*****************************************************************************/
1958
/* Altivec extension helpers */
1959
#if defined(WORDS_BIGENDIAN)
1960
#define HI_IDX 0
1961
#define LO_IDX 1
1962
#else
1963
#define HI_IDX 1
1964
#define LO_IDX 0
1965
#endif
1966

    
1967
#if defined(WORDS_BIGENDIAN)
1968
#define VECTOR_FOR_INORDER_I(index, element)            \
1969
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1970
#else
1971
#define VECTOR_FOR_INORDER_I(index, element)            \
1972
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1973
#endif
1974

    
1975
/* Saturating arithmetic helpers.  */
1976
#define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1977
    static always_inline to_type cvt##from##to (from_type x, int *sat)  \
1978
    {                                                                   \
1979
        to_type r;                                                      \
1980
        if (use_min && x < min) {                                       \
1981
            r = min;                                                    \
1982
            *sat = 1;                                                   \
1983
        } else if (use_max && x > max) {                                \
1984
            r = max;                                                    \
1985
            *sat = 1;                                                   \
1986
        } else {                                                        \
1987
            r = x;                                                      \
1988
        }                                                               \
1989
        return r;                                                       \
1990
    }
1991
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX, 1, 1)
1992
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX, 1, 1)
1993
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX, 1, 1)
1994
SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
1995
SATCVT(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX, 0, 1)
1996
SATCVT(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX, 0, 1)
1997
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX, 1, 1)
1998
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX, 1, 1)
1999
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX, 1, 1)
2000
#undef SATCVT
2001

    
2002
#define LVE(name, access, swap, element)                        \
2003
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2004
    {                                                           \
2005
        size_t n_elems = ARRAY_SIZE(r->element);                \
2006
        int adjust = HI_IDX*(n_elems-1);                        \
2007
        int sh = sizeof(r->element[0]) >> 1;                    \
2008
        int index = (addr & 0xf) >> sh;                         \
2009
        if(msr_le) {                                            \
2010
            r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2011
        } else {                                                        \
2012
            r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2013
        }                                                               \
2014
    }
2015
#define I(x) (x)
2016
LVE(lvebx, ldub, I, u8)
2017
LVE(lvehx, lduw, bswap16, u16)
2018
LVE(lvewx, ldl, bswap32, u32)
2019
#undef I
2020
#undef LVE
2021

    
2022
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2023
{
2024
    int i, j = (sh & 0xf);
2025

    
2026
    VECTOR_FOR_INORDER_I (i, u8) {
2027
        r->u8[i] = j++;
2028
    }
2029
}
2030

    
2031
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2032
{
2033
    int i, j = 0x10 - (sh & 0xf);
2034

    
2035
    VECTOR_FOR_INORDER_I (i, u8) {
2036
        r->u8[i] = j++;
2037
    }
2038
}
2039

    
2040
#define STVE(name, access, swap, element)                       \
2041
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2042
    {                                                           \
2043
        size_t n_elems = ARRAY_SIZE(r->element);                \
2044
        int adjust = HI_IDX*(n_elems-1);                        \
2045
        int sh = sizeof(r->element[0]) >> 1;                    \
2046
        int index = (addr & 0xf) >> sh;                         \
2047
        if(msr_le) {                                            \
2048
            access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2049
        } else {                                                        \
2050
            access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2051
        }                                                               \
2052
    }
2053
#define I(x) (x)
2054
STVE(stvebx, stb, I, u8)
2055
STVE(stvehx, stw, bswap16, u16)
2056
STVE(stvewx, stl, bswap32, u32)
2057
#undef I
2058
#undef LVE
2059

    
2060
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2061
{
2062
    int i;
2063
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2064
        r->u32[i] = ~a->u32[i] < b->u32[i];
2065
    }
2066
}
2067

    
2068
#define VARITH_DO(name, op, element)        \
2069
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2070
{                                                                       \
2071
    int i;                                                              \
2072
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2073
        r->element[i] = a->element[i] op b->element[i];                 \
2074
    }                                                                   \
2075
}
2076
#define VARITH(suffix, element)                  \
2077
  VARITH_DO(add##suffix, +, element)             \
2078
  VARITH_DO(sub##suffix, -, element)
2079
VARITH(ubm, u8)
2080
VARITH(uhm, u16)
2081
VARITH(uwm, u32)
2082
#undef VARITH_DO
2083
#undef VARITH
2084

    
2085
#define VARITHSAT_CASE(type, op, cvt, element)                          \
2086
    {                                                                   \
2087
        type result = (type)a->element[i] op (type)b->element[i];       \
2088
        r->element[i] = cvt(result, &sat);                              \
2089
    }
2090

    
2091
#define VARITHSAT_DO(name, op, optype, cvt, element)                    \
2092
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2093
    {                                                                   \
2094
        int sat = 0;                                                    \
2095
        int i;                                                          \
2096
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2097
            switch (sizeof(r->element[0])) {                            \
2098
            case 1: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2099
            case 2: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2100
            case 4: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2101
            }                                                           \
2102
        }                                                               \
2103
        if (sat) {                                                      \
2104
            env->vscr |= (1 << VSCR_SAT);                               \
2105
        }                                                               \
2106
    }
2107
#define VARITHSAT_SIGNED(suffix, element, optype, cvt)        \
2108
    VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)    \
2109
    VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2110
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)       \
2111
    VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)     \
2112
    VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2113
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2114
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2115
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2116
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2117
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2118
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2119
#undef VARITHSAT_CASE
2120
#undef VARITHSAT_DO
2121
#undef VARITHSAT_SIGNED
2122
#undef VARITHSAT_UNSIGNED
2123

    
2124
#define VAVG_DO(name, element, etype)                                   \
2125
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2126
    {                                                                   \
2127
        int i;                                                          \
2128
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2129
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2130
            r->element[i] = x >> 1;                                     \
2131
        }                                                               \
2132
    }
2133

    
2134
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2135
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2136
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2137
VAVG(b, s8, int16_t, u8, uint16_t)
2138
VAVG(h, s16, int32_t, u16, uint32_t)
2139
VAVG(w, s32, int64_t, u32, uint64_t)
2140
#undef VAVG_DO
2141
#undef VAVG
2142

    
2143
#define VCMP_DO(suffix, compare, element, record)                       \
2144
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2145
    {                                                                   \
2146
        uint32_t ones = (uint32_t)-1;                                   \
2147
        uint32_t all = ones;                                            \
2148
        uint32_t none = 0;                                              \
2149
        int i;                                                          \
2150
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2151
            uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2152
            switch (sizeof (a->element[0])) {                           \
2153
            case 4: r->u32[i] = result; break;                          \
2154
            case 2: r->u16[i] = result; break;                          \
2155
            case 1: r->u8[i] = result; break;                           \
2156
            }                                                           \
2157
            all &= result;                                              \
2158
            none |= result;                                             \
2159
        }                                                               \
2160
        if (record) {                                                   \
2161
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2162
        }                                                               \
2163
    }
2164
#define VCMP(suffix, compare, element)          \
2165
    VCMP_DO(suffix, compare, element, 0)        \
2166
    VCMP_DO(suffix##_dot, compare, element, 1)
2167
VCMP(equb, ==, u8)
2168
VCMP(equh, ==, u16)
2169
VCMP(equw, ==, u32)
2170
VCMP(gtub, >, u8)
2171
VCMP(gtuh, >, u16)
2172
VCMP(gtuw, >, u32)
2173
VCMP(gtsb, >, s8)
2174
VCMP(gtsh, >, s16)
2175
VCMP(gtsw, >, s32)
2176
#undef VCMP_DO
2177
#undef VCMP
2178

    
2179
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2180
{
2181
    int sat = 0;
2182
    int i;
2183

    
2184
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2185
        int32_t prod = a->s16[i] * b->s16[i];
2186
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2187
        r->s16[i] = cvtswsh (t, &sat);
2188
    }
2189

    
2190
    if (sat) {
2191
        env->vscr |= (1 << VSCR_SAT);
2192
    }
2193
}
2194

    
2195
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2196
{
2197
    int sat = 0;
2198
    int i;
2199

    
2200
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2201
        int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2202
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2203
        r->s16[i] = cvtswsh (t, &sat);
2204
    }
2205

    
2206
    if (sat) {
2207
        env->vscr |= (1 << VSCR_SAT);
2208
    }
2209
}
2210

    
2211
#define VMINMAX_DO(name, compare, element)                              \
2212
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2213
    {                                                                   \
2214
        int i;                                                          \
2215
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2216
            if (a->element[i] compare b->element[i]) {                  \
2217
                r->element[i] = b->element[i];                          \
2218
            } else {                                                    \
2219
                r->element[i] = a->element[i];                          \
2220
            }                                                           \
2221
        }                                                               \
2222
    }
2223
#define VMINMAX(suffix, element)                \
2224
  VMINMAX_DO(min##suffix, >, element)           \
2225
  VMINMAX_DO(max##suffix, <, element)
2226
VMINMAX(sb, s8)
2227
VMINMAX(sh, s16)
2228
VMINMAX(sw, s32)
2229
VMINMAX(ub, u8)
2230
VMINMAX(uh, u16)
2231
VMINMAX(uw, u32)
2232
#undef VMINMAX_DO
2233
#undef VMINMAX
2234

    
2235
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2236
{
2237
    int i;
2238
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2239
        int32_t prod = a->s16[i] * b->s16[i];
2240
        r->s16[i] = (int16_t) (prod + c->s16[i]);
2241
    }
2242
}
2243

    
2244
#define VMRG_DO(name, element, highp)                                   \
2245
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2246
    {                                                                   \
2247
        ppc_avr_t result;                                               \
2248
        int i;                                                          \
2249
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2250
        for (i = 0; i < n_elems/2; i++) {                               \
2251
            if (highp) {                                                \
2252
                result.element[i*2+HI_IDX] = a->element[i];             \
2253
                result.element[i*2+LO_IDX] = b->element[i];             \
2254
            } else {                                                    \
2255
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2256
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2257
            }                                                           \
2258
        }                                                               \
2259
        *r = result;                                                    \
2260
    }
2261
#if defined(WORDS_BIGENDIAN)
2262
#define MRGHI 0
2263
#define MRGLO 1
2264
#else
2265
#define MRGHI 1
2266
#define MRGLO 0
2267
#endif
2268
#define VMRG(suffix, element)                   \
2269
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2270
  VMRG_DO(mrgh##suffix, element, MRGLO)
2271
VMRG(b, u8)
2272
VMRG(h, u16)
2273
VMRG(w, u32)
2274
#undef VMRG_DO
2275
#undef VMRG
2276
#undef MRGHI
2277
#undef MRGLO
2278

    
2279
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2280
{
2281
    int32_t prod[16];
2282
    int i;
2283

    
2284
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2285
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2286
    }
2287

    
2288
    VECTOR_FOR_INORDER_I(i, s32) {
2289
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2290
    }
2291
}
2292

    
2293
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2294
{
2295
    int32_t prod[8];
2296
    int i;
2297

    
2298
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2299
        prod[i] = a->s16[i] * b->s16[i];
2300
    }
2301

    
2302
    VECTOR_FOR_INORDER_I(i, s32) {
2303
        r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2304
    }
2305
}
2306

    
2307
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2308
{
2309
    int32_t prod[8];
2310
    int i;
2311
    int sat = 0;
2312

    
2313
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2314
        prod[i] = (int32_t)a->s16[i] * b->s16[i];
2315
    }
2316

    
2317
    VECTOR_FOR_INORDER_I (i, s32) {
2318
        int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2319
        r->u32[i] = cvtsdsw(t, &sat);
2320
    }
2321

    
2322
    if (sat) {
2323
        env->vscr |= (1 << VSCR_SAT);
2324
    }
2325
}
2326

    
2327
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2328
{
2329
    uint16_t prod[16];
2330
    int i;
2331

    
2332
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2333
        prod[i] = a->u8[i] * b->u8[i];
2334
    }
2335

    
2336
    VECTOR_FOR_INORDER_I(i, u32) {
2337
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2338
    }
2339
}
2340

    
2341
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2342
{
2343
    uint32_t prod[8];
2344
    int i;
2345

    
2346
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2347
        prod[i] = a->u16[i] * b->u16[i];
2348
    }
2349

    
2350
    VECTOR_FOR_INORDER_I(i, u32) {
2351
        r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2352
    }
2353
}
2354

    
2355
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2356
{
2357
    uint32_t prod[8];
2358
    int i;
2359
    int sat = 0;
2360

    
2361
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2362
        prod[i] = a->u16[i] * b->u16[i];
2363
    }
2364

    
2365
    VECTOR_FOR_INORDER_I (i, s32) {
2366
        uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2367
        r->u32[i] = cvtuduw(t, &sat);
2368
    }
2369

    
2370
    if (sat) {
2371
        env->vscr |= (1 << VSCR_SAT);
2372
    }
2373
}
2374

    
2375
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2376
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2377
    {                                                                   \
2378
        int i;                                                          \
2379
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2380
            if (evenp) {                                                \
2381
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2382
            } else {                                                    \
2383
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2384
            }                                                           \
2385
        }                                                               \
2386
    }
2387
#define VMUL(suffix, mul_element, prod_element) \
2388
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2389
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2390
VMUL(sb, s8, s16)
2391
VMUL(sh, s16, s32)
2392
VMUL(ub, u8, u16)
2393
VMUL(uh, u16, u32)
2394
#undef VMUL_DO
2395
#undef VMUL
2396

    
2397
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2398
{
2399
    ppc_avr_t result;
2400
    int i;
2401
    VECTOR_FOR_INORDER_I (i, u8) {
2402
        int s = c->u8[i] & 0x1f;
2403
#if defined(WORDS_BIGENDIAN)
2404
        int index = s & 0xf;
2405
#else
2406
        int index = 15 - (s & 0xf);
2407
#endif
2408
        if (s & 0x10) {
2409
            result.u8[i] = b->u8[index];
2410
        } else {
2411
            result.u8[i] = a->u8[index];
2412
        }
2413
    }
2414
    *r = result;
2415
}
2416

    
2417
#if defined(WORDS_BIGENDIAN)
2418
#define PKBIG 1
2419
#else
2420
#define PKBIG 0
2421
#endif
2422
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2423
{
2424
    int i, j;
2425
    ppc_avr_t result;
2426
#if defined(WORDS_BIGENDIAN)
2427
    const ppc_avr_t *x[2] = { a, b };
2428
#else
2429
    const ppc_avr_t *x[2] = { b, a };
2430
#endif
2431

    
2432
    VECTOR_FOR_INORDER_I (i, u64) {
2433
        VECTOR_FOR_INORDER_I (j, u32){
2434
            uint32_t e = x[i]->u32[j];
2435
            result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2436
                                 ((e >> 6) & 0x3e0) |
2437
                                 ((e >> 3) & 0x1f));
2438
        }
2439
    }
2440
    *r = result;
2441
}
2442

    
2443
#define VPK(suffix, from, to, cvt, dosat)       \
2444
    void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2445
    {                                                                   \
2446
        int i;                                                          \
2447
        int sat = 0;                                                    \
2448
        ppc_avr_t result;                                               \
2449
        ppc_avr_t *a0 = PKBIG ? a : b;                                  \
2450
        ppc_avr_t *a1 = PKBIG ? b : a;                                  \
2451
        VECTOR_FOR_INORDER_I (i, from) {                                \
2452
            result.to[i] = cvt(a0->from[i], &sat);                      \
2453
            result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
2454
        }                                                               \
2455
        *r = result;                                                    \
2456
        if (dosat && sat) {                                             \
2457
            env->vscr |= (1 << VSCR_SAT);                               \
2458
        }                                                               \
2459
    }
2460
#define I(x, y) (x)
2461
VPK(shss, s16, s8, cvtshsb, 1)
2462
VPK(shus, s16, u8, cvtshub, 1)
2463
VPK(swss, s32, s16, cvtswsh, 1)
2464
VPK(swus, s32, u16, cvtswuh, 1)
2465
VPK(uhus, u16, u8, cvtuhub, 1)
2466
VPK(uwus, u32, u16, cvtuwuh, 1)
2467
VPK(uhum, u16, u8, I, 0)
2468
VPK(uwum, u32, u16, I, 0)
2469
#undef I
2470
#undef VPK
2471
#undef PKBIG
2472

    
2473
#define VROTATE(suffix, element)                                        \
2474
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2475
    {                                                                   \
2476
        int i;                                                          \
2477
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2478
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2479
            unsigned int shift = b->element[i] & mask;                  \
2480
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2481
        }                                                               \
2482
    }
2483
VROTATE(b, u8)
2484
VROTATE(h, u16)
2485
VROTATE(w, u32)
2486
#undef VROTATE
2487

    
2488
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2489
{
2490
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2491
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2492
}
2493

    
2494
#if defined(WORDS_BIGENDIAN)
2495
#define LEFT 0
2496
#define RIGHT 1
2497
#else
2498
#define LEFT 1
2499
#define RIGHT 0
2500
#endif
2501
/* The specification says that the results are undefined if all of the
2502
 * shift counts are not identical.  We check to make sure that they are
2503
 * to conform to what real hardware appears to do.  */
2504
#define VSHIFT(suffix, leftp)                                           \
2505
    void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
2506
    {                                                                   \
2507
        int shift = b->u8[LO_IDX*0x15] & 0x7;                           \
2508
        int doit = 1;                                                   \
2509
        int i;                                                          \
2510
        for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
2511
            doit = doit && ((b->u8[i] & 0x7) == shift);                 \
2512
        }                                                               \
2513
        if (doit) {                                                     \
2514
            if (shift == 0) {                                           \
2515
                *r = *a;                                                \
2516
            } else if (leftp) {                                         \
2517
                uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
2518
                r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
2519
                r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
2520
            } else {                                                    \
2521
                uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
2522
                r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
2523
                r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
2524
            }                                                           \
2525
        }                                                               \
2526
    }
2527
VSHIFT(l, LEFT)
2528
VSHIFT(r, RIGHT)
2529
#undef VSHIFT
2530
#undef LEFT
2531
#undef RIGHT
2532

    
2533
#define VSL(suffix, element)                                            \
2534
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2535
    {                                                                   \
2536
        int i;                                                          \
2537
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2538
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2539
            unsigned int shift = b->element[i] & mask;                  \
2540
            r->element[i] = a->element[i] << shift;                     \
2541
        }                                                               \
2542
    }
2543
VSL(b, u8)
2544
VSL(h, u16)
2545
VSL(w, u32)
2546
#undef VSL
2547

    
2548
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2549
{
2550
    int sh = shift & 0xf;
2551
    int i;
2552
    ppc_avr_t result;
2553

    
2554
#if defined(WORDS_BIGENDIAN)
2555
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2556
        int index = sh + i;
2557
        if (index > 0xf) {
2558
            result.u8[i] = b->u8[index-0x10];
2559
        } else {
2560
            result.u8[i] = a->u8[index];
2561
        }
2562
    }
2563
#else
2564
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2565
        int index = (16 - sh) + i;
2566
        if (index > 0xf) {
2567
            result.u8[i] = a->u8[index-0x10];
2568
        } else {
2569
            result.u8[i] = b->u8[index];
2570
        }
2571
    }
2572
#endif
2573
    *r = result;
2574
}
2575

    
2576
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2577
{
2578
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2579

    
2580
#if defined (WORDS_BIGENDIAN)
2581
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2582
  memset (&r->u8[16-sh], 0, sh);
2583
#else
2584
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2585
  memset (&r->u8[0], 0, sh);
2586
#endif
2587
}
2588

    
2589
/* Experimental testing shows that hardware masks the immediate.  */
2590
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2591
#if defined(WORDS_BIGENDIAN)
2592
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2593
#else
2594
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2595
#endif
2596
#define VSPLT(suffix, element)                                          \
2597
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2598
    {                                                                   \
2599
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2600
        int i;                                                          \
2601
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2602
            r->element[i] = s;                                          \
2603
        }                                                               \
2604
    }
2605
VSPLT(b, u8)
2606
VSPLT(h, u16)
2607
VSPLT(w, u32)
2608
#undef VSPLT
2609
#undef SPLAT_ELEMENT
2610
#undef _SPLAT_MASKED
2611

    
2612
#define VSPLTI(suffix, element, splat_type)                     \
2613
    void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat)  \
2614
    {                                                           \
2615
        splat_type x = (int8_t)(splat << 3) >> 3;               \
2616
        int i;                                                  \
2617
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
2618
            r->element[i] = x;                                  \
2619
        }                                                       \
2620
    }
2621
VSPLTI(b, s8, int8_t)
2622
VSPLTI(h, s16, int16_t)
2623
VSPLTI(w, s32, int32_t)
2624
#undef VSPLTI
2625

    
2626
#define VSR(suffix, element)                                            \
2627
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2628
    {                                                                   \
2629
        int i;                                                          \
2630
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2631
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2632
            unsigned int shift = b->element[i] & mask;                  \
2633
            r->element[i] = a->element[i] >> shift;                     \
2634
        }                                                               \
2635
    }
2636
VSR(ab, s8)
2637
VSR(ah, s16)
2638
VSR(aw, s32)
2639
VSR(b, u8)
2640
VSR(h, u16)
2641
VSR(w, u32)
2642
#undef VSR
2643

    
2644
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2645
{
2646
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2647

    
2648
#if defined (WORDS_BIGENDIAN)
2649
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2650
  memset (&r->u8[0], 0, sh);
2651
#else
2652
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2653
  memset (&r->u8[16-sh], 0, sh);
2654
#endif
2655
}
2656

    
2657
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2658
{
2659
    int i;
2660
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2661
        r->u32[i] = a->u32[i] >= b->u32[i];
2662
    }
2663
}
2664

    
2665
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2666
{
2667
    int64_t t;
2668
    int i, upper;
2669
    ppc_avr_t result;
2670
    int sat = 0;
2671

    
2672
#if defined(WORDS_BIGENDIAN)
2673
    upper = ARRAY_SIZE(r->s32)-1;
2674
#else
2675
    upper = 0;
2676
#endif
2677
    t = (int64_t)b->s32[upper];
2678
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2679
        t += a->s32[i];
2680
        result.s32[i] = 0;
2681
    }
2682
    result.s32[upper] = cvtsdsw(t, &sat);
2683
    *r = result;
2684

    
2685
    if (sat) {
2686
        env->vscr |= (1 << VSCR_SAT);
2687
    }
2688
}
2689

    
2690
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2691
{
2692
    int i, j, upper;
2693
    ppc_avr_t result;
2694
    int sat = 0;
2695

    
2696
#if defined(WORDS_BIGENDIAN)
2697
    upper = 1;
2698
#else
2699
    upper = 0;
2700
#endif
2701
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2702
        int64_t t = (int64_t)b->s32[upper+i*2];
2703
        result.u64[i] = 0;
2704
        for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2705
            t += a->s32[2*i+j];
2706
        }
2707
        result.s32[upper+i*2] = cvtsdsw(t, &sat);
2708
    }
2709

    
2710
    *r = result;
2711
    if (sat) {
2712
        env->vscr |= (1 << VSCR_SAT);
2713
    }
2714
}
2715

    
2716
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2717
{
2718
    int i, j;
2719
    int sat = 0;
2720

    
2721
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2722
        int64_t t = (int64_t)b->s32[i];
2723
        for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2724
            t += a->s8[4*i+j];
2725
        }
2726
        r->s32[i] = cvtsdsw(t, &sat);
2727
    }
2728

    
2729
    if (sat) {
2730
        env->vscr |= (1 << VSCR_SAT);
2731
    }
2732
}
2733

    
2734
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2735
{
2736
    int sat = 0;
2737
    int i;
2738

    
2739
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2740
        int64_t t = (int64_t)b->s32[i];
2741
        t += a->s16[2*i] + a->s16[2*i+1];
2742
        r->s32[i] = cvtsdsw(t, &sat);
2743
    }
2744

    
2745
    if (sat) {
2746
        env->vscr |= (1 << VSCR_SAT);
2747
    }
2748
}
2749

    
2750
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2751
{
2752
    int i, j;
2753
    int sat = 0;
2754

    
2755
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2756
        uint64_t t = (uint64_t)b->u32[i];
2757
        for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2758
            t += a->u8[4*i+j];
2759
        }
2760
        r->u32[i] = cvtuduw(t, &sat);
2761
    }
2762

    
2763
    if (sat) {
2764
        env->vscr |= (1 << VSCR_SAT);
2765
    }
2766
}
2767

    
2768
#if defined(WORDS_BIGENDIAN)
2769
#define UPKHI 1
2770
#define UPKLO 0
2771
#else
2772
#define UPKHI 0
2773
#define UPKLO 1
2774
#endif
2775
#define VUPKPX(suffix, hi)                                      \
2776
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
2777
    {                                                           \
2778
        int i;                                                  \
2779
        ppc_avr_t result;                                       \
2780
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
2781
            uint16_t e = b->u16[hi ? i : i+4];                  \
2782
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
2783
            uint8_t r = (e >> 10) & 0x1f;                       \
2784
            uint8_t g = (e >> 5) & 0x1f;                        \
2785
            uint8_t b = e & 0x1f;                               \
2786
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
2787
        }                                                               \
2788
        *r = result;                                                    \
2789
    }
2790
VUPKPX(lpx, UPKLO)
2791
VUPKPX(hpx, UPKHI)
2792
#undef VUPKPX
2793

    
2794
#define VUPK(suffix, unpacked, packee, hi)                              \
2795
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2796
    {                                                                   \
2797
        int i;                                                          \
2798
        ppc_avr_t result;                                               \
2799
        if (hi) {                                                       \
2800
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
2801
                result.unpacked[i] = b->packee[i];                      \
2802
            }                                                           \
2803
        } else {                                                        \
2804
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
2805
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
2806
            }                                                           \
2807
        }                                                               \
2808
        *r = result;                                                    \
2809
    }
2810
VUPK(hsb, s16, s8, UPKHI)
2811
VUPK(hsh, s32, s16, UPKHI)
2812
VUPK(lsb, s16, s8, UPKLO)
2813
VUPK(lsh, s32, s16, UPKLO)
2814
#undef VUPK
2815
#undef UPKHI
2816
#undef UPKLO
2817

    
2818
#undef VECTOR_FOR_INORDER_I
2819
#undef HI_IDX
2820
#undef LO_IDX
2821

    
2822
/*****************************************************************************/
2823
/* SPE extension helpers */
2824
/* Use a table to make this quicker */
2825
static uint8_t hbrev[16] = {
2826
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2827
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
2828
};
2829

    
2830
static always_inline uint8_t byte_reverse (uint8_t val)
2831
{
2832
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
2833
}
2834

    
2835
static always_inline uint32_t word_reverse (uint32_t val)
2836
{
2837
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2838
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2839
}
2840

    
2841
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2842
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2843
{
2844
    uint32_t a, b, d, mask;
2845

    
2846
    mask = UINT32_MAX >> (32 - MASKBITS);
2847
    a = arg1 & mask;
2848
    b = arg2 & mask;
2849
    d = word_reverse(1 + word_reverse(a | ~b));
2850
    return (arg1 & ~mask) | (d & b);
2851
}
2852

    
2853
uint32_t helper_cntlsw32 (uint32_t val)
2854
{
2855
    if (val & 0x80000000)
2856
        return clz32(~val);
2857
    else
2858
        return clz32(val);
2859
}
2860

    
2861
uint32_t helper_cntlzw32 (uint32_t val)
2862
{
2863
    return clz32(val);
2864
}
2865

    
2866
/* Single-precision floating-point conversions */
2867
static always_inline uint32_t efscfsi (uint32_t val)
2868
{
2869
    CPU_FloatU u;
2870

    
2871
    u.f = int32_to_float32(val, &env->spe_status);
2872

    
2873
    return u.l;
2874
}
2875

    
2876
static always_inline uint32_t efscfui (uint32_t val)
2877
{
2878
    CPU_FloatU u;
2879

    
2880
    u.f = uint32_to_float32(val, &env->spe_status);
2881

    
2882
    return u.l;
2883
}
2884

    
2885
static always_inline int32_t efsctsi (uint32_t val)
2886
{
2887
    CPU_FloatU u;
2888

    
2889
    u.l = val;
2890
    /* NaN are not treated the same way IEEE 754 does */
2891
    if (unlikely(float32_is_nan(u.f)))
2892
        return 0;
2893

    
2894
    return float32_to_int32(u.f, &env->spe_status);
2895
}
2896

    
2897
static always_inline uint32_t efsctui (uint32_t val)
2898
{
2899
    CPU_FloatU u;
2900

    
2901
    u.l = val;
2902
    /* NaN are not treated the same way IEEE 754 does */
2903
    if (unlikely(float32_is_nan(u.f)))
2904
        return 0;
2905

    
2906
    return float32_to_uint32(u.f, &env->spe_status);
2907
}
2908

    
2909
static always_inline uint32_t efsctsiz (uint32_t val)
2910
{
2911
    CPU_FloatU u;
2912

    
2913
    u.l = val;
2914
    /* NaN are not treated the same way IEEE 754 does */
2915
    if (unlikely(float32_is_nan(u.f)))
2916
        return 0;
2917

    
2918
    return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2919
}
2920

    
2921
static always_inline uint32_t efsctuiz (uint32_t val)
2922
{
2923
    CPU_FloatU u;
2924

    
2925
    u.l = val;
2926
    /* NaN are not treated the same way IEEE 754 does */
2927
    if (unlikely(float32_is_nan(u.f)))
2928
        return 0;
2929

    
2930
    return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2931
}
2932

    
2933
static always_inline uint32_t efscfsf (uint32_t val)
2934
{
2935
    CPU_FloatU u;
2936
    float32 tmp;
2937

    
2938
    u.f = int32_to_float32(val, &env->spe_status);
2939
    tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2940
    u.f = float32_div(u.f, tmp, &env->spe_status);
2941

    
2942
    return u.l;
2943
}
2944

    
2945
static always_inline uint32_t efscfuf (uint32_t val)
2946
{
2947
    CPU_FloatU u;
2948
    float32 tmp;
2949

    
2950
    u.f = uint32_to_float32(val, &env->spe_status);
2951
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2952
    u.f = float32_div(u.f, tmp, &env->spe_status);
2953

    
2954
    return u.l;
2955
}
2956

    
2957
static always_inline uint32_t efsctsf (uint32_t val)
2958
{
2959
    CPU_FloatU u;
2960
    float32 tmp;
2961

    
2962
    u.l = val;
2963
    /* NaN are not treated the same way IEEE 754 does */
2964
    if (unlikely(float32_is_nan(u.f)))
2965
        return 0;
2966
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2967
    u.f = float32_mul(u.f, tmp, &env->spe_status);
2968

    
2969
    return float32_to_int32(u.f, &env->spe_status);
2970
}
2971

    
2972
static always_inline uint32_t efsctuf (uint32_t val)
2973
{
2974
    CPU_FloatU u;
2975
    float32 tmp;
2976

    
2977
    u.l = val;
2978
    /* NaN are not treated the same way IEEE 754 does */
2979
    if (unlikely(float32_is_nan(u.f)))
2980
        return 0;
2981
    tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2982
    u.f = float32_mul(u.f, tmp, &env->spe_status);
2983

    
2984
    return float32_to_uint32(u.f, &env->spe_status);
2985
}
2986

    
2987
#define HELPER_SPE_SINGLE_CONV(name)                                          \
2988
uint32_t helper_e##name (uint32_t val)                                        \
2989
{                                                                             \
2990
    return e##name(val);                                                      \
2991
}
2992
/* efscfsi */
2993
HELPER_SPE_SINGLE_CONV(fscfsi);
2994
/* efscfui */
2995
HELPER_SPE_SINGLE_CONV(fscfui);
2996
/* efscfuf */
2997
HELPER_SPE_SINGLE_CONV(fscfuf);
2998
/* efscfsf */
2999
HELPER_SPE_SINGLE_CONV(fscfsf);
3000
/* efsctsi */
3001
HELPER_SPE_SINGLE_CONV(fsctsi);
3002
/* efsctui */
3003
HELPER_SPE_SINGLE_CONV(fsctui);
3004
/* efsctsiz */
3005
HELPER_SPE_SINGLE_CONV(fsctsiz);
3006
/* efsctuiz */
3007
HELPER_SPE_SINGLE_CONV(fsctuiz);
3008
/* efsctsf */
3009
HELPER_SPE_SINGLE_CONV(fsctsf);
3010
/* efsctuf */
3011
HELPER_SPE_SINGLE_CONV(fsctuf);
3012

    
3013
#define HELPER_SPE_VECTOR_CONV(name)                                          \
3014
uint64_t helper_ev##name (uint64_t val)                                       \
3015
{                                                                             \
3016
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
3017
            (uint64_t)e##name(val);                                           \
3018
}
3019
/* evfscfsi */
3020
HELPER_SPE_VECTOR_CONV(fscfsi);
3021
/* evfscfui */
3022
HELPER_SPE_VECTOR_CONV(fscfui);
3023
/* evfscfuf */
3024
HELPER_SPE_VECTOR_CONV(fscfuf);
3025
/* evfscfsf */
3026
HELPER_SPE_VECTOR_CONV(fscfsf);
3027
/* evfsctsi */
3028
HELPER_SPE_VECTOR_CONV(fsctsi);
3029
/* evfsctui */
3030
HELPER_SPE_VECTOR_CONV(fsctui);
3031
/* evfsctsiz */
3032
HELPER_SPE_VECTOR_CONV(fsctsiz);
3033
/* evfsctuiz */
3034
HELPER_SPE_VECTOR_CONV(fsctuiz);
3035
/* evfsctsf */
3036
HELPER_SPE_VECTOR_CONV(fsctsf);
3037
/* evfsctuf */
3038
HELPER_SPE_VECTOR_CONV(fsctuf);
3039

    
3040
/* Single-precision floating-point arithmetic */
3041
static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
3042
{
3043
    CPU_FloatU u1, u2;
3044
    u1.l = op1;
3045
    u2.l = op2;
3046
    u1.f = float32_add(u1.f, u2.f, &env->spe_status);
3047
    return u1.l;
3048
}
3049

    
3050
static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
3051
{
3052
    CPU_FloatU u1, u2;
3053
    u1.l = op1;
3054
    u2.l = op2;
3055
    u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
3056
    return u1.l;
3057
}
3058

    
3059
static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
3060
{
3061
    CPU_FloatU u1, u2;
3062
    u1.l = op1;
3063
    u2.l = op2;
3064
    u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
3065
    return u1.l;
3066
}
3067

    
3068
static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
3069
{
3070
    CPU_FloatU u1, u2;
3071
    u1.l = op1;
3072
    u2.l = op2;
3073
    u1.f = float32_div(u1.f, u2.f, &env->spe_status);
3074
    return u1.l;
3075
}
3076

    
3077
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
3078
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3079
{                                                                             \
3080
    return e##name(op1, op2);                                                 \
3081
}
3082
/* efsadd */
3083
HELPER_SPE_SINGLE_ARITH(fsadd);
3084
/* efssub */
3085
HELPER_SPE_SINGLE_ARITH(fssub);
3086
/* efsmul */
3087
HELPER_SPE_SINGLE_ARITH(fsmul);
3088
/* efsdiv */
3089
HELPER_SPE_SINGLE_ARITH(fsdiv);
3090

    
3091
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
3092
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3093
{                                                                             \
3094
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
3095
            (uint64_t)e##name(op1, op2);                                      \
3096
}
3097
/* evfsadd */
3098
HELPER_SPE_VECTOR_ARITH(fsadd);
3099
/* evfssub */
3100
HELPER_SPE_VECTOR_ARITH(fssub);
3101
/* evfsmul */
3102
HELPER_SPE_VECTOR_ARITH(fsmul);
3103
/* evfsdiv */
3104
HELPER_SPE_VECTOR_ARITH(fsdiv);
3105

    
3106
/* Single-precision floating-point comparisons */
3107
static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
3108
{
3109
    CPU_FloatU u1, u2;
3110
    u1.l = op1;
3111
    u2.l = op2;
3112
    return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
3113
}
3114

    
3115
static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
3116
{
3117
    CPU_FloatU u1, u2;
3118
    u1.l = op1;
3119
    u2.l = op2;
3120
    return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
3121
}
3122

    
3123
static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
3124
{
3125
    CPU_FloatU u1, u2;
3126
    u1.l = op1;
3127
    u2.l = op2;
3128
    return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
3129
}
3130

    
3131
static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
3132
{
3133
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3134
    return efststlt(op1, op2);
3135
}
3136

    
3137
static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
3138
{
3139
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3140
    return efststgt(op1, op2);
3141
}
3142

    
3143
static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
3144
{
3145
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3146
    return efststeq(op1, op2);
3147
}
3148

    
3149
#define HELPER_SINGLE_SPE_CMP(name)                                           \
3150
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3151
{                                                                             \
3152
    return e##name(op1, op2) << 2;                                            \
3153
}
3154
/* efststlt */
3155
HELPER_SINGLE_SPE_CMP(fststlt);
3156
/* efststgt */
3157
HELPER_SINGLE_SPE_CMP(fststgt);
3158
/* efststeq */
3159
HELPER_SINGLE_SPE_CMP(fststeq);
3160
/* efscmplt */
3161
HELPER_SINGLE_SPE_CMP(fscmplt);
3162
/* efscmpgt */
3163
HELPER_SINGLE_SPE_CMP(fscmpgt);
3164
/* efscmpeq */
3165
HELPER_SINGLE_SPE_CMP(fscmpeq);
3166

    
3167
static always_inline uint32_t evcmp_merge (int t0, int t1)
3168
{
3169
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3170
}
3171

    
3172
#define HELPER_VECTOR_SPE_CMP(name)                                           \
3173
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3174
{                                                                             \
3175
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
3176
}
3177
/* evfststlt */
3178
HELPER_VECTOR_SPE_CMP(fststlt);
3179
/* evfststgt */
3180
HELPER_VECTOR_SPE_CMP(fststgt);
3181
/* evfststeq */
3182
HELPER_VECTOR_SPE_CMP(fststeq);
3183
/* evfscmplt */
3184
HELPER_VECTOR_SPE_CMP(fscmplt);
3185
/* evfscmpgt */
3186
HELPER_VECTOR_SPE_CMP(fscmpgt);
3187
/* evfscmpeq */
3188
HELPER_VECTOR_SPE_CMP(fscmpeq);
3189

    
3190
/* Double-precision floating-point conversion */
3191
uint64_t helper_efdcfsi (uint32_t val)
3192
{
3193
    CPU_DoubleU u;
3194

    
3195
    u.d = int32_to_float64(val, &env->spe_status);
3196

    
3197
    return u.ll;
3198
}
3199

    
3200
uint64_t helper_efdcfsid (uint64_t val)
3201
{
3202
    CPU_DoubleU u;
3203

    
3204
    u.d = int64_to_float64(val, &env->spe_status);
3205

    
3206
    return u.ll;
3207
}
3208

    
3209
uint64_t helper_efdcfui (uint32_t val)
3210
{
3211
    CPU_DoubleU u;
3212

    
3213
    u.d = uint32_to_float64(val, &env->spe_status);
3214

    
3215
    return u.ll;
3216
}
3217

    
3218
uint64_t helper_efdcfuid (uint64_t val)
3219
{
3220
    CPU_DoubleU u;
3221

    
3222
    u.d = uint64_to_float64(val, &env->spe_status);
3223

    
3224
    return u.ll;
3225
}
3226

    
3227
uint32_t helper_efdctsi (uint64_t val)
3228
{
3229
    CPU_DoubleU u;
3230

    
3231
    u.ll = val;
3232
    /* NaN are not treated the same way IEEE 754 does */
3233
    if (unlikely(float64_is_nan(u.d)))
3234
        return 0;
3235

    
3236
    return float64_to_int32(u.d, &env->spe_status);
3237
}
3238

    
3239
uint32_t helper_efdctui (uint64_t val)
3240
{
3241
    CPU_DoubleU u;
3242

    
3243
    u.ll = val;
3244
    /* NaN are not treated the same way IEEE 754 does */
3245
    if (unlikely(float64_is_nan(u.d)))
3246
        return 0;
3247

    
3248
    return float64_to_uint32(u.d, &env->spe_status);
3249
}
3250

    
3251
uint32_t helper_efdctsiz (uint64_t val)
3252
{
3253
    CPU_DoubleU u;
3254

    
3255
    u.ll = val;
3256
    /* NaN are not treated the same way IEEE 754 does */
3257
    if (unlikely(float64_is_nan(u.d)))
3258
        return 0;
3259

    
3260
    return float64_to_int32_round_to_zero(u.d, &env->spe_status);
3261
}
3262

    
3263
uint64_t helper_efdctsidz (uint64_t val)
3264
{
3265
    CPU_DoubleU u;
3266

    
3267
    u.ll = val;
3268
    /* NaN are not treated the same way IEEE 754 does */
3269
    if (unlikely(float64_is_nan(u.d)))
3270
        return 0;
3271

    
3272
    return float64_to_int64_round_to_zero(u.d, &env->spe_status);
3273
}
3274

    
3275
uint32_t helper_efdctuiz (uint64_t val)
3276
{
3277
    CPU_DoubleU u;
3278

    
3279
    u.ll = val;
3280
    /* NaN are not treated the same way IEEE 754 does */
3281
    if (unlikely(float64_is_nan(u.d)))
3282
        return 0;
3283

    
3284
    return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
3285
}
3286

    
3287
uint64_t helper_efdctuidz (uint64_t val)
3288
{
3289
    CPU_DoubleU u;
3290

    
3291
    u.ll = val;
3292
    /* NaN are not treated the same way IEEE 754 does */
3293
    if (unlikely(float64_is_nan(u.d)))
3294
        return 0;
3295

    
3296
    return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
3297
}
3298

    
3299
uint64_t helper_efdcfsf (uint32_t val)
3300
{
3301
    CPU_DoubleU u;
3302
    float64 tmp;
3303

    
3304
    u.d = int32_to_float64(val, &env->spe_status);
3305
    tmp = int64_to_float64(1ULL << 32, &env->spe_status);
3306
    u.d = float64_div(u.d, tmp, &env->spe_status);
3307

    
3308
    return u.ll;
3309
}
3310

    
3311
uint64_t helper_efdcfuf (uint32_t val)
3312
{
3313
    CPU_DoubleU u;
3314
    float64 tmp;
3315

    
3316
    u.d = uint32_to_float64(val, &env->spe_status);
3317
    tmp = int64_to_float64(1ULL << 32, &env->spe_status);
3318
    u.d = float64_div(u.d, tmp, &env->spe_status);
3319

    
3320
    return u.ll;
3321
}
3322

    
3323
uint32_t helper_efdctsf (uint64_t val)
3324
{
3325
    CPU_DoubleU u;
3326
    float64 tmp;
3327

    
3328
    u.ll = val;
3329
    /* NaN are not treated the same way IEEE 754 does */
3330
    if (unlikely(float64_is_nan(u.d)))
3331
        return 0;
3332
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
3333
    u.d = float64_mul(u.d, tmp, &env->spe_status);
3334

    
3335
    return float64_to_int32(u.d, &env->spe_status);
3336
}
3337

    
3338
uint32_t helper_efdctuf (uint64_t val)
3339
{
3340
    CPU_DoubleU u;
3341
    float64 tmp;
3342

    
3343
    u.ll = val;
3344
    /* NaN are not treated the same way IEEE 754 does */
3345
    if (unlikely(float64_is_nan(u.d)))
3346
        return 0;
3347
    tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
3348
    u.d = float64_mul(u.d, tmp, &env->spe_status);
3349

    
3350
    return float64_to_uint32(u.d, &env->spe_status);
3351
}
3352

    
3353
uint32_t helper_efscfd (uint64_t val)
3354
{
3355
    CPU_DoubleU u1;
3356
    CPU_FloatU u2;
3357

    
3358
    u1.ll = val;
3359
    u2.f = float64_to_float32(u1.d, &env->spe_status);
3360

    
3361
    return u2.l;
3362
}
3363

    
3364
uint64_t helper_efdcfs (uint32_t val)
3365
{
3366
    CPU_DoubleU u2;
3367
    CPU_FloatU u1;
3368

    
3369
    u1.l = val;
3370
    u2.d = float32_to_float64(u1.f, &env->spe_status);
3371

    
3372
    return u2.ll;
3373
}
3374

    
3375
/* Double precision fixed-point arithmetic */
3376
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3377
{
3378
    CPU_DoubleU u1, u2;
3379
    u1.ll = op1;
3380
    u2.ll = op2;
3381
    u1.d = float64_add(u1.d, u2.d, &env->spe_status);
3382
    return u1.ll;
3383
}
3384

    
3385
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3386
{
3387
    CPU_DoubleU u1, u2;
3388
    u1.ll = op1;
3389
    u2.ll = op2;
3390
    u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
3391
    return u1.ll;
3392
}
3393

    
3394
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3395
{
3396
    CPU_DoubleU u1, u2;
3397
    u1.ll = op1;
3398
    u2.ll = op2;
3399
    u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
3400
    return u1.ll;
3401
}
3402

    
3403
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3404
{
3405
    CPU_DoubleU u1, u2;
3406
    u1.ll = op1;
3407
    u2.ll = op2;
3408
    u1.d = float64_div(u1.d, u2.d, &env->spe_status);
3409
    return u1.ll;
3410
}
3411

    
3412
/* Double precision floating point helpers */
3413
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3414
{
3415
    CPU_DoubleU u1, u2;
3416
    u1.ll = op1;
3417
    u2.ll = op2;
3418
    return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
3419
}
3420

    
3421
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3422
{
3423
    CPU_DoubleU u1, u2;
3424
    u1.ll = op1;
3425
    u2.ll = op2;
3426
    return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
3427
}
3428

    
3429
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3430
{
3431
    CPU_DoubleU u1, u2;
3432
    u1.ll = op1;
3433
    u2.ll = op2;
3434
    return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
3435
}
3436

    
3437
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3438
{
3439
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3440
    return helper_efdtstlt(op1, op2);
3441
}
3442

    
3443
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3444
{
3445
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3446
    return helper_efdtstgt(op1, op2);
3447
}
3448

    
3449
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3450
{
3451
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3452
    return helper_efdtsteq(op1, op2);
3453
}
3454

    
3455
/*****************************************************************************/
3456
/* Softmmu support */
3457
#if !defined (CONFIG_USER_ONLY)
3458

    
3459
#define MMUSUFFIX _mmu
3460

    
3461
#define SHIFT 0
3462
#include "softmmu_template.h"
3463

    
3464
#define SHIFT 1
3465
#include "softmmu_template.h"
3466

    
3467
#define SHIFT 2
3468
#include "softmmu_template.h"
3469

    
3470
#define SHIFT 3
3471
#include "softmmu_template.h"
3472

    
3473
/* try to fill the TLB and return an exception if error. If retaddr is
3474
   NULL, it means that the function was called in C code (i.e. not
3475
   from generated code or from helper.c) */
3476
/* XXX: fix it to restore all registers */
3477
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3478
{
3479
    TranslationBlock *tb;
3480
    CPUState *saved_env;
3481
    unsigned long pc;
3482
    int ret;
3483

    
3484
    /* XXX: hack to restore env in all cases, even if not called from
3485
       generated code */
3486
    saved_env = env;
3487
    env = cpu_single_env;
3488
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3489
    if (unlikely(ret != 0)) {
3490
        if (likely(retaddr)) {
3491
            /* now we have a real cpu fault */
3492
            pc = (unsigned long)retaddr;
3493
            tb = tb_find_pc(pc);
3494
            if (likely(tb)) {
3495
                /* the PC is inside the translated code. It means that we have
3496
                   a virtual CPU fault */
3497
                cpu_restore_state(tb, env, pc, NULL);
3498
            }
3499
        }
3500
        helper_raise_exception_err(env->exception_index, env->error_code);
3501
    }
3502
    env = saved_env;
3503
}
3504

    
3505
/* Segment registers load and store */
3506
target_ulong helper_load_sr (target_ulong sr_num)
3507
{
3508
    return env->sr[sr_num];
3509
}
3510

    
3511
void helper_store_sr (target_ulong sr_num, target_ulong val)
3512
{
3513
    ppc_store_sr(env, sr_num, val);
3514
}
3515

    
3516
/* SLB management */
3517
#if defined(TARGET_PPC64)
3518
target_ulong helper_load_slb (target_ulong slb_nr)
3519
{
3520
    return ppc_load_slb(env, slb_nr);
3521
}
3522

    
3523
void helper_store_slb (target_ulong slb_nr, target_ulong rs)
3524
{
3525
    ppc_store_slb(env, slb_nr, rs);
3526
}
3527

    
3528
void helper_slbia (void)
3529
{
3530
    ppc_slb_invalidate_all(env);
3531
}
3532

    
3533
void helper_slbie (target_ulong addr)
3534
{
3535
    ppc_slb_invalidate_one(env, addr);
3536
}
3537

    
3538
#endif /* defined(TARGET_PPC64) */
3539

    
3540
/* TLB management */
3541
void helper_tlbia (void)
3542
{
3543
    ppc_tlb_invalidate_all(env);
3544
}
3545

    
3546
void helper_tlbie (target_ulong addr)
3547
{
3548
    ppc_tlb_invalidate_one(env, addr);
3549
}
3550

    
3551
/* Software driven TLBs management */
3552
/* PowerPC 602/603 software TLB load instructions helpers */
3553
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3554
{
3555
    target_ulong RPN, CMP, EPN;
3556
    int way;
3557

    
3558
    RPN = env->spr[SPR_RPA];
3559
    if (is_code) {
3560
        CMP = env->spr[SPR_ICMP];
3561
        EPN = env->spr[SPR_IMISS];
3562
    } else {
3563
        CMP = env->spr[SPR_DCMP];
3564
        EPN = env->spr[SPR_DMISS];
3565
    }
3566
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3567
#if defined (DEBUG_SOFTWARE_TLB)
3568
    if (loglevel != 0) {
3569
        fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3570
                " PTE1 " ADDRX " way %d\n",
3571
                __func__, new_EPN, EPN, CMP, RPN, way);
3572
    }
3573
#endif
3574
    /* Store this TLB */
3575
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3576
                     way, is_code, CMP, RPN);
3577
}
3578

    
3579
void helper_6xx_tlbd (target_ulong EPN)
3580
{
3581
    do_6xx_tlb(EPN, 0);
3582
}
3583

    
3584
void helper_6xx_tlbi (target_ulong EPN)
3585
{
3586
    do_6xx_tlb(EPN, 1);
3587
}
3588

    
3589
/* PowerPC 74xx software TLB load instructions helpers */
3590
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3591
{
3592
    target_ulong RPN, CMP, EPN;
3593
    int way;
3594

    
3595
    RPN = env->spr[SPR_PTELO];
3596
    CMP = env->spr[SPR_PTEHI];
3597
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3598
    way = env->spr[SPR_TLBMISS] & 0x3;
3599
#if defined (DEBUG_SOFTWARE_TLB)
3600
    if (loglevel != 0) {
3601
        fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3602
                " PTE1 " ADDRX " way %d\n",
3603
                __func__, new_EPN, EPN, CMP, RPN, way);
3604
    }
3605
#endif
3606
    /* Store this TLB */
3607
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3608
                     way, is_code, CMP, RPN);
3609
}
3610

    
3611
void helper_74xx_tlbd (target_ulong EPN)
3612
{
3613
    do_74xx_tlb(EPN, 0);
3614
}
3615

    
3616
void helper_74xx_tlbi (target_ulong EPN)
3617
{
3618
    do_74xx_tlb(EPN, 1);
3619
}
3620

    
3621
static always_inline target_ulong booke_tlb_to_page_size (int size)
3622
{
3623
    return 1024 << (2 * size);
3624
}
3625

    
3626
static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3627
{
3628
    int size;
3629

    
3630
    switch (page_size) {
3631
    case 0x00000400UL:
3632
        size = 0x0;
3633
        break;
3634
    case 0x00001000UL:
3635
        size = 0x1;
3636
        break;
3637
    case 0x00004000UL:
3638
        size = 0x2;
3639
        break;
3640
    case 0x00010000UL:
3641
        size = 0x3;
3642
        break;
3643
    case 0x00040000UL:
3644
        size = 0x4;
3645
        break;
3646
    case 0x00100000UL:
3647
        size = 0x5;
3648
        break;
3649
    case 0x00400000UL:
3650
        size = 0x6;
3651
        break;
3652
    case 0x01000000UL:
3653
        size = 0x7;
3654
        break;
3655
    case 0x04000000UL:
3656
        size = 0x8;
3657
        break;
3658
    case 0x10000000UL:
3659
        size = 0x9;
3660
        break;
3661
    case 0x40000000UL:
3662
        size = 0xA;
3663
        break;
3664
#if defined (TARGET_PPC64)
3665
    case 0x000100000000ULL:
3666
        size = 0xB;
3667
        break;
3668
    case 0x000400000000ULL:
3669
        size = 0xC;
3670
        break;
3671
    case 0x001000000000ULL:
3672
        size = 0xD;
3673
        break;
3674
    case 0x004000000000ULL:
3675
        size = 0xE;
3676
        break;
3677
    case 0x010000000000ULL:
3678
        size = 0xF;
3679
        break;
3680
#endif
3681
    default:
3682
        size = -1;
3683
        break;
3684
    }
3685

    
3686
    return size;
3687
}
3688

    
3689
/* Helpers for 4xx TLB management */
3690
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3691
{
3692
    ppcemb_tlb_t *tlb;
3693
    target_ulong ret;
3694
    int size;
3695

    
3696
    entry &= 0x3F;
3697
    tlb = &env->tlb[entry].tlbe;
3698
    ret = tlb->EPN;
3699
    if (tlb->prot & PAGE_VALID)
3700
        ret |= 0x400;
3701
    size = booke_page_size_to_tlb(tlb->size);
3702
    if (size < 0 || size > 0x7)
3703
        size = 1;
3704
    ret |= size << 7;
3705
    env->spr[SPR_40x_PID] = tlb->PID;
3706
    return ret;
3707
}
3708

    
3709
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3710
{
3711
    ppcemb_tlb_t *tlb;
3712
    target_ulong ret;
3713

    
3714
    entry &= 0x3F;
3715
    tlb = &env->tlb[entry].tlbe;
3716
    ret = tlb->RPN;
3717
    if (tlb->prot & PAGE_EXEC)
3718
        ret |= 0x200;
3719
    if (tlb->prot & PAGE_WRITE)
3720
        ret |= 0x100;
3721
    return ret;
3722
}
3723

    
3724
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3725
{
3726
    ppcemb_tlb_t *tlb;
3727
    target_ulong page, end;
3728

    
3729
#if defined (DEBUG_SOFTWARE_TLB)
3730
    if (loglevel != 0) {
3731
        fprintf(logfile, "%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3732
    }
3733
#endif
3734
    entry &= 0x3F;
3735
    tlb = &env->tlb[entry].tlbe;
3736
    /* Invalidate previous TLB (if it's valid) */
3737
    if (tlb->prot & PAGE_VALID) {
3738
        end = tlb->EPN + tlb->size;
3739
#if defined (DEBUG_SOFTWARE_TLB)
3740
        if (loglevel != 0) {
3741
            fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
3742
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3743
        }
3744
#endif
3745
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3746
            tlb_flush_page(env, page);
3747
    }
3748
    tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3749
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3750
     * If this ever occurs, one should use the ppcemb target instead
3751
     * of the ppc or ppc64 one
3752
     */
3753
    if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3754
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3755
                  "are not supported (%d)\n",
3756
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3757
    }
3758
    tlb->EPN = val & ~(tlb->size - 1);
3759
    if (val & 0x40)
3760
        tlb->prot |= PAGE_VALID;
3761
    else
3762
        tlb->prot &= ~PAGE_VALID;
3763
    if (val & 0x20) {
3764
        /* XXX: TO BE FIXED */
3765
        cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3766
    }
3767
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3768
    tlb->attr = val & 0xFF;
3769
#if defined (DEBUG_SOFTWARE_TLB)
3770
    if (loglevel != 0) {
3771
        fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3772
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3773
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3774
                tlb->prot & PAGE_READ ? 'r' : '-',
3775
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3776
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3777
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3778
    }
3779
#endif
3780
    /* Invalidate new TLB (if valid) */
3781
    if (tlb->prot & PAGE_VALID) {
3782
        end = tlb->EPN + tlb->size;
3783
#if defined (DEBUG_SOFTWARE_TLB)
3784
        if (loglevel != 0) {
3785
            fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
3786
                    " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3787
        }
3788
#endif
3789
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3790
            tlb_flush_page(env, page);
3791
    }
3792
}
3793

    
3794
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
3795
{
3796
    ppcemb_tlb_t *tlb;
3797

    
3798
#if defined (DEBUG_SOFTWARE_TLB)
3799
    if (loglevel != 0) {
3800
        fprintf(logfile, "%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
3801
    }
3802
#endif
3803
    entry &= 0x3F;
3804
    tlb = &env->tlb[entry].tlbe;
3805
    tlb->RPN = val & 0xFFFFFC00;
3806
    tlb->prot = PAGE_READ;
3807
    if (val & 0x200)
3808
        tlb->prot |= PAGE_EXEC;
3809
    if (val & 0x100)
3810
        tlb->prot |= PAGE_WRITE;
3811
#if defined (DEBUG_SOFTWARE_TLB)
3812
    if (loglevel != 0) {
3813
        fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3814
                " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3815
                (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3816
                tlb->prot & PAGE_READ ? 'r' : '-',
3817
                tlb->prot & PAGE_WRITE ? 'w' : '-',
3818
                tlb->prot & PAGE_EXEC ? 'x' : '-',
3819
                tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3820
    }
3821
#endif
3822
}
3823

    
3824
target_ulong helper_4xx_tlbsx (target_ulong address)
3825
{
3826
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
3827
}
3828

    
3829
/* PowerPC 440 TLB management */
3830
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
3831
{
3832
    ppcemb_tlb_t *tlb;
3833
    target_ulong EPN, RPN, size;
3834
    int do_flush_tlbs;
3835

    
3836
#if defined (DEBUG_SOFTWARE_TLB)
3837
    if (loglevel != 0) {
3838
        fprintf(logfile, "%s word %d entry %d value " ADDRX "\n",
3839
                __func__, word, (int)entry, value);
3840
    }
3841
#endif
3842
    do_flush_tlbs = 0;
3843
    entry &= 0x3F;
3844
    tlb = &env->tlb[entry].tlbe;
3845
    switch (word) {
3846
    default:
3847
        /* Just here to please gcc */
3848
    case 0:
3849
        EPN = value & 0xFFFFFC00;
3850
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
3851
            do_flush_tlbs = 1;
3852
        tlb->EPN = EPN;
3853
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
3854
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3855
            do_flush_tlbs = 1;
3856
        tlb->size = size;
3857
        tlb->attr &= ~0x1;
3858
        tlb->attr |= (value >> 8) & 1;
3859
        if (value & 0x200) {
3860
            tlb->prot |= PAGE_VALID;
3861
        } else {
3862
            if (tlb->prot & PAGE_VALID) {
3863
                tlb->prot &= ~PAGE_VALID;
3864
                do_flush_tlbs = 1;
3865
            }
3866
        }
3867
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3868
        if (do_flush_tlbs)
3869
            tlb_flush(env, 1);
3870
        break;
3871
    case 1:
3872
        RPN = value & 0xFFFFFC0F;
3873
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3874
            tlb_flush(env, 1);
3875
        tlb->RPN = RPN;
3876
        break;
3877
    case 2:
3878
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
3879
        tlb->prot = tlb->prot & PAGE_VALID;
3880
        if (value & 0x1)
3881
            tlb->prot |= PAGE_READ << 4;
3882
        if (value & 0x2)
3883
            tlb->prot |= PAGE_WRITE << 4;
3884
        if (value & 0x4)
3885
            tlb->prot |= PAGE_EXEC << 4;
3886
        if (value & 0x8)
3887
            tlb->prot |= PAGE_READ;
3888
        if (value & 0x10)
3889
            tlb->prot |= PAGE_WRITE;
3890
        if (value & 0x20)
3891
            tlb->prot |= PAGE_EXEC;
3892
        break;
3893
    }
3894
}
3895

    
3896
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
3897
{
3898
    ppcemb_tlb_t *tlb;
3899
    target_ulong ret;
3900
    int size;
3901

    
3902
    entry &= 0x3F;
3903
    tlb = &env->tlb[entry].tlbe;
3904
    switch (word) {
3905
    default:
3906
        /* Just here to please gcc */
3907
    case 0:
3908
        ret = tlb->EPN;
3909
        size = booke_page_size_to_tlb(tlb->size);
3910
        if (size < 0 || size > 0xF)
3911
            size = 1;
3912
        ret |= size << 4;
3913
        if (tlb->attr & 0x1)
3914
            ret |= 0x100;
3915
        if (tlb->prot & PAGE_VALID)
3916
            ret |= 0x200;
3917
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3918
        env->spr[SPR_440_MMUCR] |= tlb->PID;
3919
        break;
3920
    case 1:
3921
        ret = tlb->RPN;
3922
        break;
3923
    case 2:
3924
        ret = tlb->attr & ~0x1;
3925
        if (tlb->prot & (PAGE_READ << 4))
3926
            ret |= 0x1;
3927
        if (tlb->prot & (PAGE_WRITE << 4))
3928
            ret |= 0x2;
3929
        if (tlb->prot & (PAGE_EXEC << 4))
3930
            ret |= 0x4;
3931
        if (tlb->prot & PAGE_READ)
3932
            ret |= 0x8;
3933
        if (tlb->prot & PAGE_WRITE)
3934
            ret |= 0x10;
3935
        if (tlb->prot & PAGE_EXEC)
3936
            ret |= 0x20;
3937
        break;
3938
    }
3939
    return ret;
3940
}
3941

    
3942
target_ulong helper_440_tlbsx (target_ulong address)
3943
{
3944
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
3945
}
3946

    
3947
#endif /* !CONFIG_USER_ONLY */