Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ c3d420ea

History | View | Annotate | Download (124.1 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <string.h>
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "helper.h"
23

    
24
#include "helper_regs.h"
25

    
26
//#define DEBUG_OP
27
//#define DEBUG_EXCEPTIONS
28
//#define DEBUG_SOFTWARE_TLB
29

    
30
#ifdef DEBUG_SOFTWARE_TLB
31
#  define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
32
#else
33
#  define LOG_SWTLB(...) do { } while (0)
34
#endif
35

    
36

    
37
/*****************************************************************************/
38
/* Exceptions processing helpers */
39

    
40
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
41
{
42
#if 0
43
    printf("Raise exception %3x code : %d\n", exception, error_code);
44
#endif
45
    env->exception_index = exception;
46
    env->error_code = error_code;
47
    cpu_loop_exit();
48
}
49

    
50
void helper_raise_exception (uint32_t exception)
51
{
52
    helper_raise_exception_err(exception, 0);
53
}
54

    
55
/*****************************************************************************/
56
/* SPR accesses */
57
void helper_load_dump_spr (uint32_t sprn)
58
{
59
    qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
60
             env->spr[sprn]);
61
}
62

    
63
void helper_store_dump_spr (uint32_t sprn)
64
{
65
    qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
66
             env->spr[sprn]);
67
}
68

    
69
target_ulong helper_load_tbl (void)
70
{
71
    return (target_ulong)cpu_ppc_load_tbl(env);
72
}
73

    
74
target_ulong helper_load_tbu (void)
75
{
76
    return cpu_ppc_load_tbu(env);
77
}
78

    
79
target_ulong helper_load_atbl (void)
80
{
81
    return (target_ulong)cpu_ppc_load_atbl(env);
82
}
83

    
84
target_ulong helper_load_atbu (void)
85
{
86
    return cpu_ppc_load_atbu(env);
87
}
88

    
89
target_ulong helper_load_601_rtcl (void)
90
{
91
    return cpu_ppc601_load_rtcl(env);
92
}
93

    
94
target_ulong helper_load_601_rtcu (void)
95
{
96
    return cpu_ppc601_load_rtcu(env);
97
}
98

    
99
#if !defined(CONFIG_USER_ONLY)
100
#if defined (TARGET_PPC64)
101
void helper_store_asr (target_ulong val)
102
{
103
    ppc_store_asr(env, val);
104
}
105
#endif
106

    
107
void helper_store_sdr1 (target_ulong val)
108
{
109
    ppc_store_sdr1(env, val);
110
}
111

    
112
void helper_store_tbl (target_ulong val)
113
{
114
    cpu_ppc_store_tbl(env, val);
115
}
116

    
117
void helper_store_tbu (target_ulong val)
118
{
119
    cpu_ppc_store_tbu(env, val);
120
}
121

    
122
void helper_store_atbl (target_ulong val)
123
{
124
    cpu_ppc_store_atbl(env, val);
125
}
126

    
127
void helper_store_atbu (target_ulong val)
128
{
129
    cpu_ppc_store_atbu(env, val);
130
}
131

    
132
void helper_store_601_rtcl (target_ulong val)
133
{
134
    cpu_ppc601_store_rtcl(env, val);
135
}
136

    
137
void helper_store_601_rtcu (target_ulong val)
138
{
139
    cpu_ppc601_store_rtcu(env, val);
140
}
141

    
142
target_ulong helper_load_decr (void)
143
{
144
    return cpu_ppc_load_decr(env);
145
}
146

    
147
void helper_store_decr (target_ulong val)
148
{
149
    cpu_ppc_store_decr(env, val);
150
}
151

    
152
void helper_store_hid0_601 (target_ulong val)
153
{
154
    target_ulong hid0;
155

    
156
    hid0 = env->spr[SPR_HID0];
157
    if ((val ^ hid0) & 0x00000008) {
158
        /* Change current endianness */
159
        env->hflags &= ~(1 << MSR_LE);
160
        env->hflags_nmsr &= ~(1 << MSR_LE);
161
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
162
        env->hflags |= env->hflags_nmsr;
163
        qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
164
                 val & 0x8 ? 'l' : 'b', env->hflags);
165
    }
166
    env->spr[SPR_HID0] = (uint32_t)val;
167
}
168

    
169
void helper_store_403_pbr (uint32_t num, target_ulong value)
170
{
171
    if (likely(env->pb[num] != value)) {
172
        env->pb[num] = value;
173
        /* Should be optimized */
174
        tlb_flush(env, 1);
175
    }
176
}
177

    
178
target_ulong helper_load_40x_pit (void)
179
{
180
    return load_40x_pit(env);
181
}
182

    
183
void helper_store_40x_pit (target_ulong val)
184
{
185
    store_40x_pit(env, val);
186
}
187

    
188
void helper_store_40x_dbcr0 (target_ulong val)
189
{
190
    store_40x_dbcr0(env, val);
191
}
192

    
193
void helper_store_40x_sler (target_ulong val)
194
{
195
    store_40x_sler(env, val);
196
}
197

    
198
void helper_store_booke_tcr (target_ulong val)
199
{
200
    store_booke_tcr(env, val);
201
}
202

    
203
void helper_store_booke_tsr (target_ulong val)
204
{
205
    store_booke_tsr(env, val);
206
}
207

    
208
void helper_store_ibatu (uint32_t nr, target_ulong val)
209
{
210
    ppc_store_ibatu(env, nr, val);
211
}
212

    
213
void helper_store_ibatl (uint32_t nr, target_ulong val)
214
{
215
    ppc_store_ibatl(env, nr, val);
216
}
217

    
218
void helper_store_dbatu (uint32_t nr, target_ulong val)
219
{
220
    ppc_store_dbatu(env, nr, val);
221
}
222

    
223
void helper_store_dbatl (uint32_t nr, target_ulong val)
224
{
225
    ppc_store_dbatl(env, nr, val);
226
}
227

    
228
void helper_store_601_batl (uint32_t nr, target_ulong val)
229
{
230
    ppc_store_ibatl_601(env, nr, val);
231
}
232

    
233
void helper_store_601_batu (uint32_t nr, target_ulong val)
234
{
235
    ppc_store_ibatu_601(env, nr, val);
236
}
237
#endif
238

    
239
/*****************************************************************************/
240
/* Memory load and stores */
241

    
242
static inline target_ulong addr_add(target_ulong addr, target_long arg)
243
{
244
#if defined(TARGET_PPC64)
245
        if (!msr_sf)
246
            return (uint32_t)(addr + arg);
247
        else
248
#endif
249
            return addr + arg;
250
}
251

    
252
void helper_lmw (target_ulong addr, uint32_t reg)
253
{
254
    for (; reg < 32; reg++) {
255
        if (msr_le)
256
            env->gpr[reg] = bswap32(ldl(addr));
257
        else
258
            env->gpr[reg] = ldl(addr);
259
        addr = addr_add(addr, 4);
260
    }
261
}
262

    
263
void helper_stmw (target_ulong addr, uint32_t reg)
264
{
265
    for (; reg < 32; reg++) {
266
        if (msr_le)
267
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
268
        else
269
            stl(addr, (uint32_t)env->gpr[reg]);
270
        addr = addr_add(addr, 4);
271
    }
272
}
273

    
274
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
275
{
276
    int sh;
277
    for (; nb > 3; nb -= 4) {
278
        env->gpr[reg] = ldl(addr);
279
        reg = (reg + 1) % 32;
280
        addr = addr_add(addr, 4);
281
    }
282
    if (unlikely(nb > 0)) {
283
        env->gpr[reg] = 0;
284
        for (sh = 24; nb > 0; nb--, sh -= 8) {
285
            env->gpr[reg] |= ldub(addr) << sh;
286
            addr = addr_add(addr, 1);
287
        }
288
    }
289
}
290
/* PPC32 specification says we must generate an exception if
291
 * rA is in the range of registers to be loaded.
292
 * In an other hand, IBM says this is valid, but rA won't be loaded.
293
 * For now, I'll follow the spec...
294
 */
295
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
296
{
297
    if (likely(xer_bc != 0)) {
298
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
299
                     (reg < rb && (reg + xer_bc) > rb))) {
300
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
301
                                       POWERPC_EXCP_INVAL |
302
                                       POWERPC_EXCP_INVAL_LSWX);
303
        } else {
304
            helper_lsw(addr, xer_bc, reg);
305
        }
306
    }
307
}
308

    
309
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
310
{
311
    int sh;
312
    for (; nb > 3; nb -= 4) {
313
        stl(addr, env->gpr[reg]);
314
        reg = (reg + 1) % 32;
315
        addr = addr_add(addr, 4);
316
    }
317
    if (unlikely(nb > 0)) {
318
        for (sh = 24; nb > 0; nb--, sh -= 8) {
319
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
320
            addr = addr_add(addr, 1);
321
        }
322
    }
323
}
324

    
325
static void do_dcbz(target_ulong addr, int dcache_line_size)
326
{
327
    addr &= ~(dcache_line_size - 1);
328
    int i;
329
    for (i = 0 ; i < dcache_line_size ; i += 4) {
330
        stl(addr + i , 0);
331
    }
332
    if (env->reserve_addr == addr)
333
        env->reserve_addr = (target_ulong)-1ULL;
334
}
335

    
336
void helper_dcbz(target_ulong addr)
337
{
338
    do_dcbz(addr, env->dcache_line_size);
339
}
340

    
341
void helper_dcbz_970(target_ulong addr)
342
{
343
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
344
        do_dcbz(addr, 32);
345
    else
346
        do_dcbz(addr, env->dcache_line_size);
347
}
348

    
349
void helper_icbi(target_ulong addr)
350
{
351
    uint32_t tmp;
352

    
353
    addr &= ~(env->dcache_line_size - 1);
354
    /* Invalidate one cache line :
355
     * PowerPC specification says this is to be treated like a load
356
     * (not a fetch) by the MMU. To be sure it will be so,
357
     * do the load "by hand".
358
     */
359
    tmp = ldl(addr);
360
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
361
}
362

    
363
// XXX: to be tested
364
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
365
{
366
    int i, c, d;
367
    d = 24;
368
    for (i = 0; i < xer_bc; i++) {
369
        c = ldub(addr);
370
        addr = addr_add(addr, 1);
371
        /* ra (if not 0) and rb are never modified */
372
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
373
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
374
        }
375
        if (unlikely(c == xer_cmp))
376
            break;
377
        if (likely(d != 0)) {
378
            d -= 8;
379
        } else {
380
            d = 24;
381
            reg++;
382
            reg = reg & 0x1F;
383
        }
384
    }
385
    return i;
386
}
387

    
388
/*****************************************************************************/
389
/* Fixed point operations helpers */
390
#if defined(TARGET_PPC64)
391

    
392
/* multiply high word */
393
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
394
{
395
    uint64_t tl, th;
396

    
397
    muls64(&tl, &th, arg1, arg2);
398
    return th;
399
}
400

    
401
/* multiply high word unsigned */
402
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
403
{
404
    uint64_t tl, th;
405

    
406
    mulu64(&tl, &th, arg1, arg2);
407
    return th;
408
}
409

    
410
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
411
{
412
    int64_t th;
413
    uint64_t tl;
414

    
415
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
416
    /* If th != 0 && th != -1, then we had an overflow */
417
    if (likely((uint64_t)(th + 1) <= 1)) {
418
        env->xer &= ~(1 << XER_OV);
419
    } else {
420
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
421
    }
422
    return (int64_t)tl;
423
}
424
#endif
425

    
426
target_ulong helper_cntlzw (target_ulong t)
427
{
428
    return clz32(t);
429
}
430

    
431
#if defined(TARGET_PPC64)
432
target_ulong helper_cntlzd (target_ulong t)
433
{
434
    return clz64(t);
435
}
436
#endif
437

    
438
/* shift right arithmetic helper */
439
target_ulong helper_sraw (target_ulong value, target_ulong shift)
440
{
441
    int32_t ret;
442

    
443
    if (likely(!(shift & 0x20))) {
444
        if (likely((uint32_t)shift != 0)) {
445
            shift &= 0x1f;
446
            ret = (int32_t)value >> shift;
447
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
448
                env->xer &= ~(1 << XER_CA);
449
            } else {
450
                env->xer |= (1 << XER_CA);
451
            }
452
        } else {
453
            ret = (int32_t)value;
454
            env->xer &= ~(1 << XER_CA);
455
        }
456
    } else {
457
        ret = (int32_t)value >> 31;
458
        if (ret) {
459
            env->xer |= (1 << XER_CA);
460
        } else {
461
            env->xer &= ~(1 << XER_CA);
462
        }
463
    }
464
    return (target_long)ret;
465
}
466

    
467
#if defined(TARGET_PPC64)
468
target_ulong helper_srad (target_ulong value, target_ulong shift)
469
{
470
    int64_t ret;
471

    
472
    if (likely(!(shift & 0x40))) {
473
        if (likely((uint64_t)shift != 0)) {
474
            shift &= 0x3f;
475
            ret = (int64_t)value >> shift;
476
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
477
                env->xer &= ~(1 << XER_CA);
478
            } else {
479
                env->xer |= (1 << XER_CA);
480
            }
481
        } else {
482
            ret = (int64_t)value;
483
            env->xer &= ~(1 << XER_CA);
484
        }
485
    } else {
486
        ret = (int64_t)value >> 63;
487
        if (ret) {
488
            env->xer |= (1 << XER_CA);
489
        } else {
490
            env->xer &= ~(1 << XER_CA);
491
        }
492
    }
493
    return ret;
494
}
495
#endif
496

    
497
target_ulong helper_popcntb (target_ulong val)
498
{
499
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
500
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
501
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
502
    return val;
503
}
504

    
505
#if defined(TARGET_PPC64)
506
target_ulong helper_popcntb_64 (target_ulong val)
507
{
508
    val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
509
    val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
510
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
511
    return val;
512
}
513
#endif
514

    
515
/*****************************************************************************/
516
/* Floating point operations helpers */
517
uint64_t helper_float32_to_float64(uint32_t arg)
518
{
519
    CPU_FloatU f;
520
    CPU_DoubleU d;
521
    f.l = arg;
522
    d.d = float32_to_float64(f.f, &env->fp_status);
523
    return d.ll;
524
}
525

    
526
uint32_t helper_float64_to_float32(uint64_t arg)
527
{
528
    CPU_FloatU f;
529
    CPU_DoubleU d;
530
    d.ll = arg;
531
    f.f = float64_to_float32(d.d, &env->fp_status);
532
    return f.l;
533
}
534

    
535
static inline int isden(float64 d)
536
{
537
    CPU_DoubleU u;
538

    
539
    u.d = d;
540

    
541
    return ((u.ll >> 52) & 0x7FF) == 0;
542
}
543

    
544
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
545
{
546
    CPU_DoubleU farg;
547
    int isneg;
548
    int ret;
549
    farg.ll = arg;
550
    isneg = float64_is_neg(farg.d);
551
    if (unlikely(float64_is_nan(farg.d))) {
552
        if (float64_is_signaling_nan(farg.d)) {
553
            /* Signaling NaN: flags are undefined */
554
            ret = 0x00;
555
        } else {
556
            /* Quiet NaN */
557
            ret = 0x11;
558
        }
559
    } else if (unlikely(float64_is_infinity(farg.d))) {
560
        /* +/- infinity */
561
        if (isneg)
562
            ret = 0x09;
563
        else
564
            ret = 0x05;
565
    } else {
566
        if (float64_is_zero(farg.d)) {
567
            /* +/- zero */
568
            if (isneg)
569
                ret = 0x12;
570
            else
571
                ret = 0x02;
572
        } else {
573
            if (isden(farg.d)) {
574
                /* Denormalized numbers */
575
                ret = 0x10;
576
            } else {
577
                /* Normalized numbers */
578
                ret = 0x00;
579
            }
580
            if (isneg) {
581
                ret |= 0x08;
582
            } else {
583
                ret |= 0x04;
584
            }
585
        }
586
    }
587
    if (set_fprf) {
588
        /* We update FPSCR_FPRF */
589
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
590
        env->fpscr |= ret << FPSCR_FPRF;
591
    }
592
    /* We just need fpcc to update Rc1 */
593
    return ret & 0xF;
594
}
595

    
596
/* Floating-point invalid operations exception */
597
static inline uint64_t fload_invalid_op_excp(int op)
598
{
599
    uint64_t ret = 0;
600
    int ve;
601

    
602
    ve = fpscr_ve;
603
    switch (op) {
604
    case POWERPC_EXCP_FP_VXSNAN:
605
        env->fpscr |= 1 << FPSCR_VXSNAN;
606
        break;
607
    case POWERPC_EXCP_FP_VXSOFT:
608
        env->fpscr |= 1 << FPSCR_VXSOFT;
609
        break;
610
    case POWERPC_EXCP_FP_VXISI:
611
        /* Magnitude subtraction of infinities */
612
        env->fpscr |= 1 << FPSCR_VXISI;
613
        goto update_arith;
614
    case POWERPC_EXCP_FP_VXIDI:
615
        /* Division of infinity by infinity */
616
        env->fpscr |= 1 << FPSCR_VXIDI;
617
        goto update_arith;
618
    case POWERPC_EXCP_FP_VXZDZ:
619
        /* Division of zero by zero */
620
        env->fpscr |= 1 << FPSCR_VXZDZ;
621
        goto update_arith;
622
    case POWERPC_EXCP_FP_VXIMZ:
623
        /* Multiplication of zero by infinity */
624
        env->fpscr |= 1 << FPSCR_VXIMZ;
625
        goto update_arith;
626
    case POWERPC_EXCP_FP_VXVC:
627
        /* Ordered comparison of NaN */
628
        env->fpscr |= 1 << FPSCR_VXVC;
629
        env->fpscr &= ~(0xF << FPSCR_FPCC);
630
        env->fpscr |= 0x11 << FPSCR_FPCC;
631
        /* We must update the target FPR before raising the exception */
632
        if (ve != 0) {
633
            env->exception_index = POWERPC_EXCP_PROGRAM;
634
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
635
            /* Update the floating-point enabled exception summary */
636
            env->fpscr |= 1 << FPSCR_FEX;
637
            /* Exception is differed */
638
            ve = 0;
639
        }
640
        break;
641
    case POWERPC_EXCP_FP_VXSQRT:
642
        /* Square root of a negative number */
643
        env->fpscr |= 1 << FPSCR_VXSQRT;
644
    update_arith:
645
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
646
        if (ve == 0) {
647
            /* Set the result to quiet NaN */
648
            ret = 0xFFF8000000000000ULL;
649
            env->fpscr &= ~(0xF << FPSCR_FPCC);
650
            env->fpscr |= 0x11 << FPSCR_FPCC;
651
        }
652
        break;
653
    case POWERPC_EXCP_FP_VXCVI:
654
        /* Invalid conversion */
655
        env->fpscr |= 1 << FPSCR_VXCVI;
656
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
657
        if (ve == 0) {
658
            /* Set the result to quiet NaN */
659
            ret = 0xFFF8000000000000ULL;
660
            env->fpscr &= ~(0xF << FPSCR_FPCC);
661
            env->fpscr |= 0x11 << FPSCR_FPCC;
662
        }
663
        break;
664
    }
665
    /* Update the floating-point invalid operation summary */
666
    env->fpscr |= 1 << FPSCR_VX;
667
    /* Update the floating-point exception summary */
668
    env->fpscr |= 1 << FPSCR_FX;
669
    if (ve != 0) {
670
        /* Update the floating-point enabled exception summary */
671
        env->fpscr |= 1 << FPSCR_FEX;
672
        if (msr_fe0 != 0 || msr_fe1 != 0)
673
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
674
    }
675
    return ret;
676
}
677

    
678
static inline void float_zero_divide_excp(void)
679
{
680
    env->fpscr |= 1 << FPSCR_ZX;
681
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
682
    /* Update the floating-point exception summary */
683
    env->fpscr |= 1 << FPSCR_FX;
684
    if (fpscr_ze != 0) {
685
        /* Update the floating-point enabled exception summary */
686
        env->fpscr |= 1 << FPSCR_FEX;
687
        if (msr_fe0 != 0 || msr_fe1 != 0) {
688
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
689
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
690
        }
691
    }
692
}
693

    
694
static inline void float_overflow_excp(void)
695
{
696
    env->fpscr |= 1 << FPSCR_OX;
697
    /* Update the floating-point exception summary */
698
    env->fpscr |= 1 << FPSCR_FX;
699
    if (fpscr_oe != 0) {
700
        /* XXX: should adjust the result */
701
        /* Update the floating-point enabled exception summary */
702
        env->fpscr |= 1 << FPSCR_FEX;
703
        /* We must update the target FPR before raising the exception */
704
        env->exception_index = POWERPC_EXCP_PROGRAM;
705
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
706
    } else {
707
        env->fpscr |= 1 << FPSCR_XX;
708
        env->fpscr |= 1 << FPSCR_FI;
709
    }
710
}
711

    
712
static inline void float_underflow_excp(void)
713
{
714
    env->fpscr |= 1 << FPSCR_UX;
715
    /* Update the floating-point exception summary */
716
    env->fpscr |= 1 << FPSCR_FX;
717
    if (fpscr_ue != 0) {
718
        /* XXX: should adjust the result */
719
        /* Update the floating-point enabled exception summary */
720
        env->fpscr |= 1 << FPSCR_FEX;
721
        /* We must update the target FPR before raising the exception */
722
        env->exception_index = POWERPC_EXCP_PROGRAM;
723
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
724
    }
725
}
726

    
727
static inline void float_inexact_excp(void)
728
{
729
    env->fpscr |= 1 << FPSCR_XX;
730
    /* Update the floating-point exception summary */
731
    env->fpscr |= 1 << FPSCR_FX;
732
    if (fpscr_xe != 0) {
733
        /* Update the floating-point enabled exception summary */
734
        env->fpscr |= 1 << FPSCR_FEX;
735
        /* We must update the target FPR before raising the exception */
736
        env->exception_index = POWERPC_EXCP_PROGRAM;
737
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
738
    }
739
}
740

    
741
static inline void fpscr_set_rounding_mode(void)
742
{
743
    int rnd_type;
744

    
745
    /* Set rounding mode */
746
    switch (fpscr_rn) {
747
    case 0:
748
        /* Best approximation (round to nearest) */
749
        rnd_type = float_round_nearest_even;
750
        break;
751
    case 1:
752
        /* Smaller magnitude (round toward zero) */
753
        rnd_type = float_round_to_zero;
754
        break;
755
    case 2:
756
        /* Round toward +infinite */
757
        rnd_type = float_round_up;
758
        break;
759
    default:
760
    case 3:
761
        /* Round toward -infinite */
762
        rnd_type = float_round_down;
763
        break;
764
    }
765
    set_float_rounding_mode(rnd_type, &env->fp_status);
766
}
767

    
768
void helper_fpscr_clrbit (uint32_t bit)
769
{
770
    int prev;
771

    
772
    prev = (env->fpscr >> bit) & 1;
773
    env->fpscr &= ~(1 << bit);
774
    if (prev == 1) {
775
        switch (bit) {
776
        case FPSCR_RN1:
777
        case FPSCR_RN:
778
            fpscr_set_rounding_mode();
779
            break;
780
        default:
781
            break;
782
        }
783
    }
784
}
785

    
786
void helper_fpscr_setbit (uint32_t bit)
787
{
788
    int prev;
789

    
790
    prev = (env->fpscr >> bit) & 1;
791
    env->fpscr |= 1 << bit;
792
    if (prev == 0) {
793
        switch (bit) {
794
        case FPSCR_VX:
795
            env->fpscr |= 1 << FPSCR_FX;
796
            if (fpscr_ve)
797
                goto raise_ve;
798
        case FPSCR_OX:
799
            env->fpscr |= 1 << FPSCR_FX;
800
            if (fpscr_oe)
801
                goto raise_oe;
802
            break;
803
        case FPSCR_UX:
804
            env->fpscr |= 1 << FPSCR_FX;
805
            if (fpscr_ue)
806
                goto raise_ue;
807
            break;
808
        case FPSCR_ZX:
809
            env->fpscr |= 1 << FPSCR_FX;
810
            if (fpscr_ze)
811
                goto raise_ze;
812
            break;
813
        case FPSCR_XX:
814
            env->fpscr |= 1 << FPSCR_FX;
815
            if (fpscr_xe)
816
                goto raise_xe;
817
            break;
818
        case FPSCR_VXSNAN:
819
        case FPSCR_VXISI:
820
        case FPSCR_VXIDI:
821
        case FPSCR_VXZDZ:
822
        case FPSCR_VXIMZ:
823
        case FPSCR_VXVC:
824
        case FPSCR_VXSOFT:
825
        case FPSCR_VXSQRT:
826
        case FPSCR_VXCVI:
827
            env->fpscr |= 1 << FPSCR_VX;
828
            env->fpscr |= 1 << FPSCR_FX;
829
            if (fpscr_ve != 0)
830
                goto raise_ve;
831
            break;
832
        case FPSCR_VE:
833
            if (fpscr_vx != 0) {
834
            raise_ve:
835
                env->error_code = POWERPC_EXCP_FP;
836
                if (fpscr_vxsnan)
837
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
838
                if (fpscr_vxisi)
839
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
840
                if (fpscr_vxidi)
841
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
842
                if (fpscr_vxzdz)
843
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
844
                if (fpscr_vximz)
845
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
846
                if (fpscr_vxvc)
847
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
848
                if (fpscr_vxsoft)
849
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
850
                if (fpscr_vxsqrt)
851
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
852
                if (fpscr_vxcvi)
853
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
854
                goto raise_excp;
855
            }
856
            break;
857
        case FPSCR_OE:
858
            if (fpscr_ox != 0) {
859
            raise_oe:
860
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
861
                goto raise_excp;
862
            }
863
            break;
864
        case FPSCR_UE:
865
            if (fpscr_ux != 0) {
866
            raise_ue:
867
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
868
                goto raise_excp;
869
            }
870
            break;
871
        case FPSCR_ZE:
872
            if (fpscr_zx != 0) {
873
            raise_ze:
874
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
875
                goto raise_excp;
876
            }
877
            break;
878
        case FPSCR_XE:
879
            if (fpscr_xx != 0) {
880
            raise_xe:
881
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
882
                goto raise_excp;
883
            }
884
            break;
885
        case FPSCR_RN1:
886
        case FPSCR_RN:
887
            fpscr_set_rounding_mode();
888
            break;
889
        default:
890
            break;
891
        raise_excp:
892
            /* Update the floating-point enabled exception summary */
893
            env->fpscr |= 1 << FPSCR_FEX;
894
                /* We have to update Rc1 before raising the exception */
895
            env->exception_index = POWERPC_EXCP_PROGRAM;
896
            break;
897
        }
898
    }
899
}
900

    
901
void helper_store_fpscr (uint64_t arg, uint32_t mask)
902
{
903
    /*
904
     * We use only the 32 LSB of the incoming fpr
905
     */
906
    uint32_t prev, new;
907
    int i;
908

    
909
    prev = env->fpscr;
910
    new = (uint32_t)arg;
911
    new &= ~0x60000000;
912
    new |= prev & 0x60000000;
913
    for (i = 0; i < 8; i++) {
914
        if (mask & (1 << i)) {
915
            env->fpscr &= ~(0xF << (4 * i));
916
            env->fpscr |= new & (0xF << (4 * i));
917
        }
918
    }
919
    /* Update VX and FEX */
920
    if (fpscr_ix != 0)
921
        env->fpscr |= 1 << FPSCR_VX;
922
    else
923
        env->fpscr &= ~(1 << FPSCR_VX);
924
    if ((fpscr_ex & fpscr_eex) != 0) {
925
        env->fpscr |= 1 << FPSCR_FEX;
926
        env->exception_index = POWERPC_EXCP_PROGRAM;
927
        /* XXX: we should compute it properly */
928
        env->error_code = POWERPC_EXCP_FP;
929
    }
930
    else
931
        env->fpscr &= ~(1 << FPSCR_FEX);
932
    fpscr_set_rounding_mode();
933
}
934

    
935
void helper_float_check_status (void)
936
{
937
#ifdef CONFIG_SOFTFLOAT
938
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
939
        (env->error_code & POWERPC_EXCP_FP)) {
940
        /* Differred floating-point exception after target FPR update */
941
        if (msr_fe0 != 0 || msr_fe1 != 0)
942
            helper_raise_exception_err(env->exception_index, env->error_code);
943
    } else {
944
        int status = get_float_exception_flags(&env->fp_status);
945
        if (status & float_flag_divbyzero) {
946
            float_zero_divide_excp();
947
        } else if (status & float_flag_overflow) {
948
            float_overflow_excp();
949
        } else if (status & float_flag_underflow) {
950
            float_underflow_excp();
951
        } else if (status & float_flag_inexact) {
952
            float_inexact_excp();
953
        }
954
    }
955
#else
956
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
957
        (env->error_code & POWERPC_EXCP_FP)) {
958
        /* Differred floating-point exception after target FPR update */
959
        if (msr_fe0 != 0 || msr_fe1 != 0)
960
            helper_raise_exception_err(env->exception_index, env->error_code);
961
    }
962
#endif
963
}
964

    
965
#ifdef CONFIG_SOFTFLOAT
966
void helper_reset_fpstatus (void)
967
{
968
    set_float_exception_flags(0, &env->fp_status);
969
}
970
#endif
971

    
972
/* fadd - fadd. */
973
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
974
{
975
    CPU_DoubleU farg1, farg2;
976

    
977
    farg1.ll = arg1;
978
    farg2.ll = arg2;
979
#if USE_PRECISE_EMULATION
980
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
981
                 float64_is_signaling_nan(farg2.d))) {
982
        /* sNaN addition */
983
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
984
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
985
                      float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
986
        /* Magnitude subtraction of infinities */
987
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
988
    } else {
989
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
990
    }
991
#else
992
    farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
993
#endif
994
    return farg1.ll;
995
}
996

    
997
/* fsub - fsub. */
998
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
999
{
1000
    CPU_DoubleU farg1, farg2;
1001

    
1002
    farg1.ll = arg1;
1003
    farg2.ll = arg2;
1004
#if USE_PRECISE_EMULATION
1005
{
1006
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1007
                 float64_is_signaling_nan(farg2.d))) {
1008
        /* sNaN subtraction */
1009
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1010
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1011
                      float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1012
        /* Magnitude subtraction of infinities */
1013
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1014
    } else {
1015
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1016
    }
1017
}
1018
#else
1019
    farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1020
#endif
1021
    return farg1.ll;
1022
}
1023

    
1024
/* fmul - fmul. */
1025
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1026
{
1027
    CPU_DoubleU farg1, farg2;
1028

    
1029
    farg1.ll = arg1;
1030
    farg2.ll = arg2;
1031
#if USE_PRECISE_EMULATION
1032
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1033
                 float64_is_signaling_nan(farg2.d))) {
1034
        /* sNaN multiplication */
1035
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1036
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1037
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1038
        /* Multiplication of zero by infinity */
1039
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1040
    } else {
1041
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1042
    }
1043
#else
1044
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1045
#endif
1046
    return farg1.ll;
1047
}
1048

    
1049
/* fdiv - fdiv. */
1050
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1051
{
1052
    CPU_DoubleU farg1, farg2;
1053

    
1054
    farg1.ll = arg1;
1055
    farg2.ll = arg2;
1056
#if USE_PRECISE_EMULATION
1057
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1058
                 float64_is_signaling_nan(farg2.d))) {
1059
        /* sNaN division */
1060
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1061
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1062
        /* Division of infinity by infinity */
1063
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1064
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1065
        /* Division of zero by zero */
1066
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1067
    } else {
1068
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1069
    }
1070
#else
1071
    farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1072
#endif
1073
    return farg1.ll;
1074
}
1075

    
1076
/* fabs */
1077
uint64_t helper_fabs (uint64_t arg)
1078
{
1079
    CPU_DoubleU farg;
1080

    
1081
    farg.ll = arg;
1082
    farg.d = float64_abs(farg.d);
1083
    return farg.ll;
1084
}
1085

    
1086
/* fnabs */
1087
uint64_t helper_fnabs (uint64_t arg)
1088
{
1089
    CPU_DoubleU farg;
1090

    
1091
    farg.ll = arg;
1092
    farg.d = float64_abs(farg.d);
1093
    farg.d = float64_chs(farg.d);
1094
    return farg.ll;
1095
}
1096

    
1097
/* fneg */
1098
uint64_t helper_fneg (uint64_t arg)
1099
{
1100
    CPU_DoubleU farg;
1101

    
1102
    farg.ll = arg;
1103
    farg.d = float64_chs(farg.d);
1104
    return farg.ll;
1105
}
1106

    
1107
/* fctiw - fctiw. */
1108
uint64_t helper_fctiw (uint64_t arg)
1109
{
1110
    CPU_DoubleU farg;
1111
    farg.ll = arg;
1112

    
1113
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1114
        /* sNaN conversion */
1115
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1116
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1117
        /* qNan / infinity conversion */
1118
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1119
    } else {
1120
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1121
#if USE_PRECISE_EMULATION
1122
        /* XXX: higher bits are not supposed to be significant.
1123
         *     to make tests easier, return the same as a real PowerPC 750
1124
         */
1125
        farg.ll |= 0xFFF80000ULL << 32;
1126
#endif
1127
    }
1128
    return farg.ll;
1129
}
1130

    
1131
/* fctiwz - fctiwz. */
1132
uint64_t helper_fctiwz (uint64_t arg)
1133
{
1134
    CPU_DoubleU farg;
1135
    farg.ll = arg;
1136

    
1137
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1138
        /* sNaN conversion */
1139
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1140
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1141
        /* qNan / infinity conversion */
1142
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1143
    } else {
1144
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1145
#if USE_PRECISE_EMULATION
1146
        /* XXX: higher bits are not supposed to be significant.
1147
         *     to make tests easier, return the same as a real PowerPC 750
1148
         */
1149
        farg.ll |= 0xFFF80000ULL << 32;
1150
#endif
1151
    }
1152
    return farg.ll;
1153
}
1154

    
1155
#if defined(TARGET_PPC64)
1156
/* fcfid - fcfid. */
1157
uint64_t helper_fcfid (uint64_t arg)
1158
{
1159
    CPU_DoubleU farg;
1160
    farg.d = int64_to_float64(arg, &env->fp_status);
1161
    return farg.ll;
1162
}
1163

    
1164
/* fctid - fctid. */
1165
uint64_t helper_fctid (uint64_t arg)
1166
{
1167
    CPU_DoubleU farg;
1168
    farg.ll = arg;
1169

    
1170
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1171
        /* sNaN conversion */
1172
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1173
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1174
        /* qNan / infinity conversion */
1175
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1176
    } else {
1177
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1178
    }
1179
    return farg.ll;
1180
}
1181

    
1182
/* fctidz - fctidz. */
1183
uint64_t helper_fctidz (uint64_t arg)
1184
{
1185
    CPU_DoubleU farg;
1186
    farg.ll = arg;
1187

    
1188
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1189
        /* sNaN conversion */
1190
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1191
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1192
        /* qNan / infinity conversion */
1193
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1194
    } else {
1195
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1196
    }
1197
    return farg.ll;
1198
}
1199

    
1200
#endif
1201

    
1202
static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1203
{
1204
    CPU_DoubleU farg;
1205
    farg.ll = arg;
1206

    
1207
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1208
        /* sNaN round */
1209
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1210
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1211
        /* qNan / infinity round */
1212
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1213
    } else {
1214
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1215
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1216
        /* Restore rounding mode from FPSCR */
1217
        fpscr_set_rounding_mode();
1218
    }
1219
    return farg.ll;
1220
}
1221

    
1222
uint64_t helper_frin (uint64_t arg)
1223
{
1224
    return do_fri(arg, float_round_nearest_even);
1225
}
1226

    
1227
uint64_t helper_friz (uint64_t arg)
1228
{
1229
    return do_fri(arg, float_round_to_zero);
1230
}
1231

    
1232
uint64_t helper_frip (uint64_t arg)
1233
{
1234
    return do_fri(arg, float_round_up);
1235
}
1236

    
1237
uint64_t helper_frim (uint64_t arg)
1238
{
1239
    return do_fri(arg, float_round_down);
1240
}
1241

    
1242
/* fmadd - fmadd. */
1243
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1244
{
1245
    CPU_DoubleU farg1, farg2, farg3;
1246

    
1247
    farg1.ll = arg1;
1248
    farg2.ll = arg2;
1249
    farg3.ll = arg3;
1250
#if USE_PRECISE_EMULATION
1251
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1252
                 float64_is_signaling_nan(farg2.d) ||
1253
                 float64_is_signaling_nan(farg3.d))) {
1254
        /* sNaN operation */
1255
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1256
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1257
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1258
        /* Multiplication of zero by infinity */
1259
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1260
    } else {
1261
#ifdef FLOAT128
1262
        /* This is the way the PowerPC specification defines it */
1263
        float128 ft0_128, ft1_128;
1264

    
1265
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1266
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1267
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1268
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1269
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1270
            /* Magnitude subtraction of infinities */
1271
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1272
        } else {
1273
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1274
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1275
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1276
        }
1277
#else
1278
        /* This is OK on x86 hosts */
1279
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1280
#endif
1281
    }
1282
#else
1283
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1284
    farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1285
#endif
1286
    return farg1.ll;
1287
}
1288

    
1289
/* fmsub - fmsub. */
1290
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1291
{
1292
    CPU_DoubleU farg1, farg2, farg3;
1293

    
1294
    farg1.ll = arg1;
1295
    farg2.ll = arg2;
1296
    farg3.ll = arg3;
1297
#if USE_PRECISE_EMULATION
1298
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1299
                 float64_is_signaling_nan(farg2.d) ||
1300
                 float64_is_signaling_nan(farg3.d))) {
1301
        /* sNaN operation */
1302
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1303
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1304
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1305
        /* Multiplication of zero by infinity */
1306
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1307
    } else {
1308
#ifdef FLOAT128
1309
        /* This is the way the PowerPC specification defines it */
1310
        float128 ft0_128, ft1_128;
1311

    
1312
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1313
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1314
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1315
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1316
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1317
            /* Magnitude subtraction of infinities */
1318
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1319
        } else {
1320
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1321
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1322
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1323
        }
1324
#else
1325
        /* This is OK on x86 hosts */
1326
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1327
#endif
1328
    }
1329
#else
1330
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1331
    farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1332
#endif
1333
    return farg1.ll;
1334
}
1335

    
1336
/* fnmadd - fnmadd. */
1337
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1338
{
1339
    CPU_DoubleU farg1, farg2, farg3;
1340

    
1341
    farg1.ll = arg1;
1342
    farg2.ll = arg2;
1343
    farg3.ll = arg3;
1344

    
1345
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1346
                 float64_is_signaling_nan(farg2.d) ||
1347
                 float64_is_signaling_nan(farg3.d))) {
1348
        /* sNaN operation */
1349
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1350
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1351
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1352
        /* Multiplication of zero by infinity */
1353
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1354
    } else {
1355
#if USE_PRECISE_EMULATION
1356
#ifdef FLOAT128
1357
        /* This is the way the PowerPC specification defines it */
1358
        float128 ft0_128, ft1_128;
1359

    
1360
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1361
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1362
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1363
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1364
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1365
            /* Magnitude subtraction of infinities */
1366
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1367
        } else {
1368
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1369
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1370
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1371
        }
1372
#else
1373
        /* This is OK on x86 hosts */
1374
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1375
#endif
1376
#else
1377
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1378
        farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1379
#endif
1380
        if (likely(!float64_is_nan(farg1.d)))
1381
            farg1.d = float64_chs(farg1.d);
1382
    }
1383
    return farg1.ll;
1384
}
1385

    
1386
/* fnmsub - fnmsub. */
1387
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1388
{
1389
    CPU_DoubleU farg1, farg2, farg3;
1390

    
1391
    farg1.ll = arg1;
1392
    farg2.ll = arg2;
1393
    farg3.ll = arg3;
1394

    
1395
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1396
                 float64_is_signaling_nan(farg2.d) ||
1397
                 float64_is_signaling_nan(farg3.d))) {
1398
        /* sNaN operation */
1399
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1400
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1401
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1402
        /* Multiplication of zero by infinity */
1403
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1404
    } else {
1405
#if USE_PRECISE_EMULATION
1406
#ifdef FLOAT128
1407
        /* This is the way the PowerPC specification defines it */
1408
        float128 ft0_128, ft1_128;
1409

    
1410
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1411
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1412
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1413
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1414
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1415
            /* Magnitude subtraction of infinities */
1416
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1417
        } else {
1418
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1419
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1420
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1421
        }
1422
#else
1423
        /* This is OK on x86 hosts */
1424
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1425
#endif
1426
#else
1427
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1428
        farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1429
#endif
1430
        if (likely(!float64_is_nan(farg1.d)))
1431
            farg1.d = float64_chs(farg1.d);
1432
    }
1433
    return farg1.ll;
1434
}
1435

    
1436
/* frsp - frsp. */
1437
uint64_t helper_frsp (uint64_t arg)
1438
{
1439
    CPU_DoubleU farg;
1440
    float32 f32;
1441
    farg.ll = arg;
1442

    
1443
#if USE_PRECISE_EMULATION
1444
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1445
        /* sNaN square root */
1446
       farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1447
    } else {
1448
       f32 = float64_to_float32(farg.d, &env->fp_status);
1449
       farg.d = float32_to_float64(f32, &env->fp_status);
1450
    }
1451
#else
1452
    f32 = float64_to_float32(farg.d, &env->fp_status);
1453
    farg.d = float32_to_float64(f32, &env->fp_status);
1454
#endif
1455
    return farg.ll;
1456
}
1457

    
1458
/* fsqrt - fsqrt. */
1459
uint64_t helper_fsqrt (uint64_t arg)
1460
{
1461
    CPU_DoubleU farg;
1462
    farg.ll = arg;
1463

    
1464
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1465
        /* sNaN square root */
1466
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1467
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1468
        /* Square root of a negative nonzero number */
1469
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1470
    } else {
1471
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1472
    }
1473
    return farg.ll;
1474
}
1475

    
1476
/* fre - fre. */
1477
uint64_t helper_fre (uint64_t arg)
1478
{
1479
    CPU_DoubleU farg;
1480
    farg.ll = arg;
1481

    
1482
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1483
        /* sNaN reciprocal */
1484
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1485
    } else {
1486
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1487
    }
1488
    return farg.d;
1489
}
1490

    
1491
/* fres - fres. */
1492
uint64_t helper_fres (uint64_t arg)
1493
{
1494
    CPU_DoubleU farg;
1495
    float32 f32;
1496
    farg.ll = arg;
1497

    
1498
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1499
        /* sNaN reciprocal */
1500
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1501
    } else {
1502
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1503
        f32 = float64_to_float32(farg.d, &env->fp_status);
1504
        farg.d = float32_to_float64(f32, &env->fp_status);
1505
    }
1506
    return farg.ll;
1507
}
1508

    
1509
/* frsqrte  - frsqrte. */
1510
uint64_t helper_frsqrte (uint64_t arg)
1511
{
1512
    CPU_DoubleU farg;
1513
    float32 f32;
1514
    farg.ll = arg;
1515

    
1516
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1517
        /* sNaN reciprocal square root */
1518
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1519
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1520
        /* Reciprocal square root of a negative nonzero number */
1521
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1522
    } else {
1523
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1524
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1525
        f32 = float64_to_float32(farg.d, &env->fp_status);
1526
        farg.d = float32_to_float64(f32, &env->fp_status);
1527
    }
1528
    return farg.ll;
1529
}
1530

    
1531
/* fsel - fsel. */
1532
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1533
{
1534
    CPU_DoubleU farg1;
1535

    
1536
    farg1.ll = arg1;
1537

    
1538
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1539
        return arg2;
1540
    else
1541
        return arg3;
1542
}
1543

    
1544
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1545
{
1546
    CPU_DoubleU farg1, farg2;
1547
    uint32_t ret = 0;
1548
    farg1.ll = arg1;
1549
    farg2.ll = arg2;
1550

    
1551
    if (unlikely(float64_is_nan(farg1.d) ||
1552
                 float64_is_nan(farg2.d))) {
1553
        ret = 0x01UL;
1554
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1555
        ret = 0x08UL;
1556
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1557
        ret = 0x04UL;
1558
    } else {
1559
        ret = 0x02UL;
1560
    }
1561

    
1562
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1563
    env->fpscr |= ret << FPSCR_FPRF;
1564
    env->crf[crfD] = ret;
1565
    if (unlikely(ret == 0x01UL
1566
                 && (float64_is_signaling_nan(farg1.d) ||
1567
                     float64_is_signaling_nan(farg2.d)))) {
1568
        /* sNaN comparison */
1569
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1570
    }
1571
}
1572

    
1573
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1574
{
1575
    CPU_DoubleU farg1, farg2;
1576
    uint32_t ret = 0;
1577
    farg1.ll = arg1;
1578
    farg2.ll = arg2;
1579

    
1580
    if (unlikely(float64_is_nan(farg1.d) ||
1581
                 float64_is_nan(farg2.d))) {
1582
        ret = 0x01UL;
1583
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1584
        ret = 0x08UL;
1585
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1586
        ret = 0x04UL;
1587
    } else {
1588
        ret = 0x02UL;
1589
    }
1590

    
1591
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1592
    env->fpscr |= ret << FPSCR_FPRF;
1593
    env->crf[crfD] = ret;
1594
    if (unlikely (ret == 0x01UL)) {
1595
        if (float64_is_signaling_nan(farg1.d) ||
1596
            float64_is_signaling_nan(farg2.d)) {
1597
            /* sNaN comparison */
1598
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1599
                                  POWERPC_EXCP_FP_VXVC);
1600
        } else {
1601
            /* qNaN comparison */
1602
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1603
        }
1604
    }
1605
}
1606

    
1607
#if !defined (CONFIG_USER_ONLY)
1608
void helper_store_msr (target_ulong val)
1609
{
1610
    val = hreg_store_msr(env, val, 0);
1611
    if (val != 0) {
1612
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1613
        helper_raise_exception(val);
1614
    }
1615
}
1616

    
1617
static inline void do_rfi(target_ulong nip, target_ulong msr,
1618
                          target_ulong msrm, int keep_msrh)
1619
{
1620
#if defined(TARGET_PPC64)
1621
    if (msr & (1ULL << MSR_SF)) {
1622
        nip = (uint64_t)nip;
1623
        msr &= (uint64_t)msrm;
1624
    } else {
1625
        nip = (uint32_t)nip;
1626
        msr = (uint32_t)(msr & msrm);
1627
        if (keep_msrh)
1628
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1629
    }
1630
#else
1631
    nip = (uint32_t)nip;
1632
    msr &= (uint32_t)msrm;
1633
#endif
1634
    /* XXX: beware: this is false if VLE is supported */
1635
    env->nip = nip & ~((target_ulong)0x00000003);
1636
    hreg_store_msr(env, msr, 1);
1637
#if defined (DEBUG_OP)
1638
    cpu_dump_rfi(env->nip, env->msr);
1639
#endif
1640
    /* No need to raise an exception here,
1641
     * as rfi is always the last insn of a TB
1642
     */
1643
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1644
}
1645

    
1646
void helper_rfi (void)
1647
{
1648
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1649
           ~((target_ulong)0x783F0000), 1);
1650
}
1651

    
1652
#if defined(TARGET_PPC64)
1653
void helper_rfid (void)
1654
{
1655
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1656
           ~((target_ulong)0x783F0000), 0);
1657
}
1658

    
1659
void helper_hrfid (void)
1660
{
1661
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1662
           ~((target_ulong)0x783F0000), 0);
1663
}
1664
#endif
1665
#endif
1666

    
1667
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1668
{
1669
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1670
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1671
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1672
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1673
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1674
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1675
    }
1676
}
1677

    
1678
#if defined(TARGET_PPC64)
1679
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1680
{
1681
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1682
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1683
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1684
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1685
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1686
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1687
}
1688
#endif
1689

    
1690
/*****************************************************************************/
1691
/* PowerPC 601 specific instructions (POWER bridge) */
1692

    
1693
target_ulong helper_clcs (uint32_t arg)
1694
{
1695
    switch (arg) {
1696
    case 0x0CUL:
1697
        /* Instruction cache line size */
1698
        return env->icache_line_size;
1699
        break;
1700
    case 0x0DUL:
1701
        /* Data cache line size */
1702
        return env->dcache_line_size;
1703
        break;
1704
    case 0x0EUL:
1705
        /* Minimum cache line size */
1706
        return (env->icache_line_size < env->dcache_line_size) ?
1707
                env->icache_line_size : env->dcache_line_size;
1708
        break;
1709
    case 0x0FUL:
1710
        /* Maximum cache line size */
1711
        return (env->icache_line_size > env->dcache_line_size) ?
1712
                env->icache_line_size : env->dcache_line_size;
1713
        break;
1714
    default:
1715
        /* Undefined */
1716
        return 0;
1717
        break;
1718
    }
1719
}
1720

    
1721
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1722
{
1723
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1724

    
1725
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1726
        (int32_t)arg2 == 0) {
1727
        env->spr[SPR_MQ] = 0;
1728
        return INT32_MIN;
1729
    } else {
1730
        env->spr[SPR_MQ] = tmp % arg2;
1731
        return  tmp / (int32_t)arg2;
1732
    }
1733
}
1734

    
1735
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1736
{
1737
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1738

    
1739
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1740
        (int32_t)arg2 == 0) {
1741
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1742
        env->spr[SPR_MQ] = 0;
1743
        return INT32_MIN;
1744
    } else {
1745
        env->spr[SPR_MQ] = tmp % arg2;
1746
        tmp /= (int32_t)arg2;
1747
        if ((int32_t)tmp != tmp) {
1748
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1749
        } else {
1750
            env->xer &= ~(1 << XER_OV);
1751
        }
1752
        return tmp;
1753
    }
1754
}
1755

    
1756
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1757
{
1758
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1759
        (int32_t)arg2 == 0) {
1760
        env->spr[SPR_MQ] = 0;
1761
        return INT32_MIN;
1762
    } else {
1763
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1764
        return (int32_t)arg1 / (int32_t)arg2;
1765
    }
1766
}
1767

    
1768
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1769
{
1770
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1771
        (int32_t)arg2 == 0) {
1772
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1773
        env->spr[SPR_MQ] = 0;
1774
        return INT32_MIN;
1775
    } else {
1776
        env->xer &= ~(1 << XER_OV);
1777
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1778
        return (int32_t)arg1 / (int32_t)arg2;
1779
    }
1780
}
1781

    
1782
#if !defined (CONFIG_USER_ONLY)
1783
target_ulong helper_rac (target_ulong addr)
1784
{
1785
    mmu_ctx_t ctx;
1786
    int nb_BATs;
1787
    target_ulong ret = 0;
1788

    
1789
    /* We don't have to generate many instances of this instruction,
1790
     * as rac is supervisor only.
1791
     */
1792
    /* XXX: FIX THIS: Pretend we have no BAT */
1793
    nb_BATs = env->nb_BATs;
1794
    env->nb_BATs = 0;
1795
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1796
        ret = ctx.raddr;
1797
    env->nb_BATs = nb_BATs;
1798
    return ret;
1799
}
1800

    
1801
void helper_rfsvc (void)
1802
{
1803
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1804
}
1805
#endif
1806

    
1807
/*****************************************************************************/
1808
/* 602 specific instructions */
1809
/* mfrom is the most crazy instruction ever seen, imho ! */
1810
/* Real implementation uses a ROM table. Do the same */
1811
/* Extremly decomposed:
1812
 *                      -arg / 256
1813
 * return 256 * log10(10           + 1.0) + 0.5
1814
 */
1815
#if !defined (CONFIG_USER_ONLY)
1816
target_ulong helper_602_mfrom (target_ulong arg)
1817
{
1818
    if (likely(arg < 602)) {
1819
#include "mfrom_table.c"
1820
        return mfrom_ROM_table[arg];
1821
    } else {
1822
        return 0;
1823
    }
1824
}
1825
#endif
1826

    
1827
/*****************************************************************************/
1828
/* Embedded PowerPC specific helpers */
1829

    
1830
/* XXX: to be improved to check access rights when in user-mode */
1831
target_ulong helper_load_dcr (target_ulong dcrn)
1832
{
1833
    uint32_t val = 0;
1834

    
1835
    if (unlikely(env->dcr_env == NULL)) {
1836
        qemu_log("No DCR environment\n");
1837
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1838
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1839
    } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1840
        qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1841
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1842
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1843
    }
1844
    return val;
1845
}
1846

    
1847
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1848
{
1849
    if (unlikely(env->dcr_env == NULL)) {
1850
        qemu_log("No DCR environment\n");
1851
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1852
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1853
    } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1854
        qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1855
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1856
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1857
    }
1858
}
1859

    
1860
#if !defined(CONFIG_USER_ONLY)
1861
void helper_40x_rfci (void)
1862
{
1863
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1864
           ~((target_ulong)0xFFFF0000), 0);
1865
}
1866

    
1867
void helper_rfci (void)
1868
{
1869
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1870
           ~((target_ulong)0x3FFF0000), 0);
1871
}
1872

    
1873
void helper_rfdi (void)
1874
{
1875
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1876
           ~((target_ulong)0x3FFF0000), 0);
1877
}
1878

    
1879
void helper_rfmci (void)
1880
{
1881
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1882
           ~((target_ulong)0x3FFF0000), 0);
1883
}
1884
#endif
1885

    
1886
/* 440 specific */
1887
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1888
{
1889
    target_ulong mask;
1890
    int i;
1891

    
1892
    i = 1;
1893
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1894
        if ((high & mask) == 0) {
1895
            if (update_Rc) {
1896
                env->crf[0] = 0x4;
1897
            }
1898
            goto done;
1899
        }
1900
        i++;
1901
    }
1902
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1903
        if ((low & mask) == 0) {
1904
            if (update_Rc) {
1905
                env->crf[0] = 0x8;
1906
            }
1907
            goto done;
1908
        }
1909
        i++;
1910
    }
1911
    if (update_Rc) {
1912
        env->crf[0] = 0x2;
1913
    }
1914
 done:
1915
    env->xer = (env->xer & ~0x7F) | i;
1916
    if (update_Rc) {
1917
        env->crf[0] |= xer_so;
1918
    }
1919
    return i;
1920
}
1921

    
1922
/*****************************************************************************/
1923
/* Altivec extension helpers */
1924
#if defined(HOST_WORDS_BIGENDIAN)
1925
#define HI_IDX 0
1926
#define LO_IDX 1
1927
#else
1928
#define HI_IDX 1
1929
#define LO_IDX 0
1930
#endif
1931

    
1932
#if defined(HOST_WORDS_BIGENDIAN)
1933
#define VECTOR_FOR_INORDER_I(index, element)            \
1934
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1935
#else
1936
#define VECTOR_FOR_INORDER_I(index, element)            \
1937
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1938
#endif
1939

    
1940
/* If X is a NaN, store the corresponding QNaN into RESULT.  Otherwise,
1941
 * execute the following block.  */
1942
#define DO_HANDLE_NAN(result, x)                \
1943
    if (float32_is_nan(x) || float32_is_signaling_nan(x)) {     \
1944
        CPU_FloatU __f;                                         \
1945
        __f.f = x;                                              \
1946
        __f.l = __f.l | (1 << 22);  /* Set QNaN bit. */         \
1947
        result = __f.f;                                         \
1948
    } else
1949

    
1950
#define HANDLE_NAN1(result, x)                  \
1951
    DO_HANDLE_NAN(result, x)
1952
#define HANDLE_NAN2(result, x, y)               \
1953
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1954
#define HANDLE_NAN3(result, x, y, z)            \
1955
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1956

    
1957
/* Saturating arithmetic helpers.  */
1958
#define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1959
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1960
    {                                                                   \
1961
        to_type r;                                                      \
1962
        if (use_min && x < min) {                                       \
1963
            r = min;                                                    \
1964
            *sat = 1;                                                   \
1965
        } else if (use_max && x > max) {                                \
1966
            r = max;                                                    \
1967
            *sat = 1;                                                   \
1968
        } else {                                                        \
1969
            r = x;                                                      \
1970
        }                                                               \
1971
        return r;                                                       \
1972
    }
1973
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX, 1, 1)
1974
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX, 1, 1)
1975
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX, 1, 1)
1976

    
1977
/* Work around gcc problems with the macro version */
1978
static inline uint8_t cvtuhub(uint16_t x, int *sat)
1979
{
1980
    uint8_t r;
1981

    
1982
    if (x > UINT8_MAX) {
1983
        r = UINT8_MAX;
1984
        *sat = 1;
1985
    } else {
1986
        r = x;
1987
    }
1988
    return r;
1989
}
1990
//SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
1991
SATCVT(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX, 0, 1)
1992
SATCVT(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX, 0, 1)
1993
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX, 1, 1)
1994
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX, 1, 1)
1995
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX, 1, 1)
1996
#undef SATCVT
1997

    
1998
#define LVE(name, access, swap, element)                        \
1999
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2000
    {                                                           \
2001
        size_t n_elems = ARRAY_SIZE(r->element);                \
2002
        int adjust = HI_IDX*(n_elems-1);                        \
2003
        int sh = sizeof(r->element[0]) >> 1;                    \
2004
        int index = (addr & 0xf) >> sh;                         \
2005
        if(msr_le) {                                            \
2006
            r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2007
        } else {                                                        \
2008
            r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2009
        }                                                               \
2010
    }
2011
#define I(x) (x)
2012
LVE(lvebx, ldub, I, u8)
2013
LVE(lvehx, lduw, bswap16, u16)
2014
LVE(lvewx, ldl, bswap32, u32)
2015
#undef I
2016
#undef LVE
2017

    
2018
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2019
{
2020
    int i, j = (sh & 0xf);
2021

    
2022
    VECTOR_FOR_INORDER_I (i, u8) {
2023
        r->u8[i] = j++;
2024
    }
2025
}
2026

    
2027
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2028
{
2029
    int i, j = 0x10 - (sh & 0xf);
2030

    
2031
    VECTOR_FOR_INORDER_I (i, u8) {
2032
        r->u8[i] = j++;
2033
    }
2034
}
2035

    
2036
#define STVE(name, access, swap, element)                       \
2037
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2038
    {                                                           \
2039
        size_t n_elems = ARRAY_SIZE(r->element);                \
2040
        int adjust = HI_IDX*(n_elems-1);                        \
2041
        int sh = sizeof(r->element[0]) >> 1;                    \
2042
        int index = (addr & 0xf) >> sh;                         \
2043
        if(msr_le) {                                            \
2044
            access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2045
        } else {                                                        \
2046
            access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2047
        }                                                               \
2048
    }
2049
#define I(x) (x)
2050
STVE(stvebx, stb, I, u8)
2051
STVE(stvehx, stw, bswap16, u16)
2052
STVE(stvewx, stl, bswap32, u32)
2053
#undef I
2054
#undef LVE
2055

    
2056
void helper_mtvscr (ppc_avr_t *r)
2057
{
2058
#if defined(HOST_WORDS_BIGENDIAN)
2059
    env->vscr = r->u32[3];
2060
#else
2061
    env->vscr = r->u32[0];
2062
#endif
2063
    set_flush_to_zero(vscr_nj, &env->vec_status);
2064
}
2065

    
2066
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2067
{
2068
    int i;
2069
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2070
        r->u32[i] = ~a->u32[i] < b->u32[i];
2071
    }
2072
}
2073

    
2074
#define VARITH_DO(name, op, element)        \
2075
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2076
{                                                                       \
2077
    int i;                                                              \
2078
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2079
        r->element[i] = a->element[i] op b->element[i];                 \
2080
    }                                                                   \
2081
}
2082
#define VARITH(suffix, element)                  \
2083
  VARITH_DO(add##suffix, +, element)             \
2084
  VARITH_DO(sub##suffix, -, element)
2085
VARITH(ubm, u8)
2086
VARITH(uhm, u16)
2087
VARITH(uwm, u32)
2088
#undef VARITH_DO
2089
#undef VARITH
2090

    
2091
#define VARITHFP(suffix, func)                                          \
2092
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2093
    {                                                                   \
2094
        int i;                                                          \
2095
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2096
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2097
                r->f[i] = func(a->f[i], b->f[i], &env->vec_status);     \
2098
            }                                                           \
2099
        }                                                               \
2100
    }
2101
VARITHFP(addfp, float32_add)
2102
VARITHFP(subfp, float32_sub)
2103
#undef VARITHFP
2104

    
2105
#define VARITHSAT_CASE(type, op, cvt, element)                          \
2106
    {                                                                   \
2107
        type result = (type)a->element[i] op (type)b->element[i];       \
2108
        r->element[i] = cvt(result, &sat);                              \
2109
    }
2110

    
2111
#define VARITHSAT_DO(name, op, optype, cvt, element)                    \
2112
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2113
    {                                                                   \
2114
        int sat = 0;                                                    \
2115
        int i;                                                          \
2116
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2117
            switch (sizeof(r->element[0])) {                            \
2118
            case 1: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2119
            case 2: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2120
            case 4: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2121
            }                                                           \
2122
        }                                                               \
2123
        if (sat) {                                                      \
2124
            env->vscr |= (1 << VSCR_SAT);                               \
2125
        }                                                               \
2126
    }
2127
#define VARITHSAT_SIGNED(suffix, element, optype, cvt)        \
2128
    VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)    \
2129
    VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2130
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)       \
2131
    VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)     \
2132
    VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2133
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2134
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2135
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2136
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2137
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2138
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2139
#undef VARITHSAT_CASE
2140
#undef VARITHSAT_DO
2141
#undef VARITHSAT_SIGNED
2142
#undef VARITHSAT_UNSIGNED
2143

    
2144
#define VAVG_DO(name, element, etype)                                   \
2145
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2146
    {                                                                   \
2147
        int i;                                                          \
2148
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2149
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2150
            r->element[i] = x >> 1;                                     \
2151
        }                                                               \
2152
    }
2153

    
2154
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2155
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2156
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2157
VAVG(b, s8, int16_t, u8, uint16_t)
2158
VAVG(h, s16, int32_t, u16, uint32_t)
2159
VAVG(w, s32, int64_t, u32, uint64_t)
2160
#undef VAVG_DO
2161
#undef VAVG
2162

    
2163
#define VCF(suffix, cvt, element)                                       \
2164
    void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2165
    {                                                                   \
2166
        int i;                                                          \
2167
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2168
            float32 t = cvt(b->element[i], &env->vec_status);           \
2169
            r->f[i] = float32_scalbn (t, -uim, &env->vec_status);       \
2170
        }                                                               \
2171
    }
2172
VCF(ux, uint32_to_float32, u32)
2173
VCF(sx, int32_to_float32, s32)
2174
#undef VCF
2175

    
2176
#define VCMP_DO(suffix, compare, element, record)                       \
2177
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2178
    {                                                                   \
2179
        uint32_t ones = (uint32_t)-1;                                   \
2180
        uint32_t all = ones;                                            \
2181
        uint32_t none = 0;                                              \
2182
        int i;                                                          \
2183
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2184
            uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2185
            switch (sizeof (a->element[0])) {                           \
2186
            case 4: r->u32[i] = result; break;                          \
2187
            case 2: r->u16[i] = result; break;                          \
2188
            case 1: r->u8[i] = result; break;                           \
2189
            }                                                           \
2190
            all &= result;                                              \
2191
            none |= result;                                             \
2192
        }                                                               \
2193
        if (record) {                                                   \
2194
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2195
        }                                                               \
2196
    }
2197
#define VCMP(suffix, compare, element)          \
2198
    VCMP_DO(suffix, compare, element, 0)        \
2199
    VCMP_DO(suffix##_dot, compare, element, 1)
2200
VCMP(equb, ==, u8)
2201
VCMP(equh, ==, u16)
2202
VCMP(equw, ==, u32)
2203
VCMP(gtub, >, u8)
2204
VCMP(gtuh, >, u16)
2205
VCMP(gtuw, >, u32)
2206
VCMP(gtsb, >, s8)
2207
VCMP(gtsh, >, s16)
2208
VCMP(gtsw, >, s32)
2209
#undef VCMP_DO
2210
#undef VCMP
2211

    
2212
#define VCMPFP_DO(suffix, compare, order, record)                       \
2213
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2214
    {                                                                   \
2215
        uint32_t ones = (uint32_t)-1;                                   \
2216
        uint32_t all = ones;                                            \
2217
        uint32_t none = 0;                                              \
2218
        int i;                                                          \
2219
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2220
            uint32_t result;                                            \
2221
            int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2222
            if (rel == float_relation_unordered) {                      \
2223
                result = 0;                                             \
2224
            } else if (rel compare order) {                             \
2225
                result = ones;                                          \
2226
            } else {                                                    \
2227
                result = 0;                                             \
2228
            }                                                           \
2229
            r->u32[i] = result;                                         \
2230
            all &= result;                                              \
2231
            none |= result;                                             \
2232
        }                                                               \
2233
        if (record) {                                                   \
2234
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2235
        }                                                               \
2236
    }
2237
#define VCMPFP(suffix, compare, order)           \
2238
    VCMPFP_DO(suffix, compare, order, 0)         \
2239
    VCMPFP_DO(suffix##_dot, compare, order, 1)
2240
VCMPFP(eqfp, ==, float_relation_equal)
2241
VCMPFP(gefp, !=, float_relation_less)
2242
VCMPFP(gtfp, ==, float_relation_greater)
2243
#undef VCMPFP_DO
2244
#undef VCMPFP
2245

    
2246
static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2247
                                    int record)
2248
{
2249
    int i;
2250
    int all_in = 0;
2251
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2252
        int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2253
        if (le_rel == float_relation_unordered) {
2254
            r->u32[i] = 0xc0000000;
2255
            /* ALL_IN does not need to be updated here.  */
2256
        } else {
2257
            float32 bneg = float32_chs(b->f[i]);
2258
            int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2259
            int le = le_rel != float_relation_greater;
2260
            int ge = ge_rel != float_relation_less;
2261
            r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2262
            all_in |= (!le | !ge);
2263
        }
2264
    }
2265
    if (record) {
2266
        env->crf[6] = (all_in == 0) << 1;
2267
    }
2268
}
2269

    
2270
void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2271
{
2272
    vcmpbfp_internal(r, a, b, 0);
2273
}
2274

    
2275
void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2276
{
2277
    vcmpbfp_internal(r, a, b, 1);
2278
}
2279

    
2280
#define VCT(suffix, satcvt, element)                                    \
2281
    void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2282
    {                                                                   \
2283
        int i;                                                          \
2284
        int sat = 0;                                                    \
2285
        float_status s = env->vec_status;                               \
2286
        set_float_rounding_mode(float_round_to_zero, &s);               \
2287
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2288
            if (float32_is_nan(b->f[i]) ||                              \
2289
                float32_is_signaling_nan(b->f[i])) {                    \
2290
                r->element[i] = 0;                                      \
2291
            } else {                                                    \
2292
                float64 t = float32_to_float64(b->f[i], &s);            \
2293
                int64_t j;                                              \
2294
                t = float64_scalbn(t, uim, &s);                         \
2295
                j = float64_to_int64(t, &s);                            \
2296
                r->element[i] = satcvt(j, &sat);                        \
2297
            }                                                           \
2298
        }                                                               \
2299
        if (sat) {                                                      \
2300
            env->vscr |= (1 << VSCR_SAT);                               \
2301
        }                                                               \
2302
    }
2303
VCT(uxs, cvtsduw, u32)
2304
VCT(sxs, cvtsdsw, s32)
2305
#undef VCT
2306

    
2307
void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2308
{
2309
    int i;
2310
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2311
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2312
            /* Need to do the computation in higher precision and round
2313
             * once at the end.  */
2314
            float64 af, bf, cf, t;
2315
            af = float32_to_float64(a->f[i], &env->vec_status);
2316
            bf = float32_to_float64(b->f[i], &env->vec_status);
2317
            cf = float32_to_float64(c->f[i], &env->vec_status);
2318
            t = float64_mul(af, cf, &env->vec_status);
2319
            t = float64_add(t, bf, &env->vec_status);
2320
            r->f[i] = float64_to_float32(t, &env->vec_status);
2321
        }
2322
    }
2323
}
2324

    
2325
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2326
{
2327
    int sat = 0;
2328
    int i;
2329

    
2330
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2331
        int32_t prod = a->s16[i] * b->s16[i];
2332
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2333
        r->s16[i] = cvtswsh (t, &sat);
2334
    }
2335

    
2336
    if (sat) {
2337
        env->vscr |= (1 << VSCR_SAT);
2338
    }
2339
}
2340

    
2341
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2342
{
2343
    int sat = 0;
2344
    int i;
2345

    
2346
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2347
        int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2348
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2349
        r->s16[i] = cvtswsh (t, &sat);
2350
    }
2351

    
2352
    if (sat) {
2353
        env->vscr |= (1 << VSCR_SAT);
2354
    }
2355
}
2356

    
2357
#define VMINMAX_DO(name, compare, element)                              \
2358
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2359
    {                                                                   \
2360
        int i;                                                          \
2361
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2362
            if (a->element[i] compare b->element[i]) {                  \
2363
                r->element[i] = b->element[i];                          \
2364
            } else {                                                    \
2365
                r->element[i] = a->element[i];                          \
2366
            }                                                           \
2367
        }                                                               \
2368
    }
2369
#define VMINMAX(suffix, element)                \
2370
  VMINMAX_DO(min##suffix, >, element)           \
2371
  VMINMAX_DO(max##suffix, <, element)
2372
VMINMAX(sb, s8)
2373
VMINMAX(sh, s16)
2374
VMINMAX(sw, s32)
2375
VMINMAX(ub, u8)
2376
VMINMAX(uh, u16)
2377
VMINMAX(uw, u32)
2378
#undef VMINMAX_DO
2379
#undef VMINMAX
2380

    
2381
#define VMINMAXFP(suffix, rT, rF)                                       \
2382
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2383
    {                                                                   \
2384
        int i;                                                          \
2385
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2386
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2387
                if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2388
                    r->f[i] = rT->f[i];                                 \
2389
                } else {                                                \
2390
                    r->f[i] = rF->f[i];                                 \
2391
                }                                                       \
2392
            }                                                           \
2393
        }                                                               \
2394
    }
2395
VMINMAXFP(minfp, a, b)
2396
VMINMAXFP(maxfp, b, a)
2397
#undef VMINMAXFP
2398

    
2399
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2400
{
2401
    int i;
2402
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2403
        int32_t prod = a->s16[i] * b->s16[i];
2404
        r->s16[i] = (int16_t) (prod + c->s16[i]);
2405
    }
2406
}
2407

    
2408
#define VMRG_DO(name, element, highp)                                   \
2409
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2410
    {                                                                   \
2411
        ppc_avr_t result;                                               \
2412
        int i;                                                          \
2413
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2414
        for (i = 0; i < n_elems/2; i++) {                               \
2415
            if (highp) {                                                \
2416
                result.element[i*2+HI_IDX] = a->element[i];             \
2417
                result.element[i*2+LO_IDX] = b->element[i];             \
2418
            } else {                                                    \
2419
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2420
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2421
            }                                                           \
2422
        }                                                               \
2423
        *r = result;                                                    \
2424
    }
2425
#if defined(HOST_WORDS_BIGENDIAN)
2426
#define MRGHI 0
2427
#define MRGLO 1
2428
#else
2429
#define MRGHI 1
2430
#define MRGLO 0
2431
#endif
2432
#define VMRG(suffix, element)                   \
2433
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2434
  VMRG_DO(mrgh##suffix, element, MRGLO)
2435
VMRG(b, u8)
2436
VMRG(h, u16)
2437
VMRG(w, u32)
2438
#undef VMRG_DO
2439
#undef VMRG
2440
#undef MRGHI
2441
#undef MRGLO
2442

    
2443
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2444
{
2445
    int32_t prod[16];
2446
    int i;
2447

    
2448
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2449
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2450
    }
2451

    
2452
    VECTOR_FOR_INORDER_I(i, s32) {
2453
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2454
    }
2455
}
2456

    
2457
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2458
{
2459
    int32_t prod[8];
2460
    int i;
2461

    
2462
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2463
        prod[i] = a->s16[i] * b->s16[i];
2464
    }
2465

    
2466
    VECTOR_FOR_INORDER_I(i, s32) {
2467
        r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2468
    }
2469
}
2470

    
2471
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2472
{
2473
    int32_t prod[8];
2474
    int i;
2475
    int sat = 0;
2476

    
2477
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2478
        prod[i] = (int32_t)a->s16[i] * b->s16[i];
2479
    }
2480

    
2481
    VECTOR_FOR_INORDER_I (i, s32) {
2482
        int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2483
        r->u32[i] = cvtsdsw(t, &sat);
2484
    }
2485

    
2486
    if (sat) {
2487
        env->vscr |= (1 << VSCR_SAT);
2488
    }
2489
}
2490

    
2491
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2492
{
2493
    uint16_t prod[16];
2494
    int i;
2495

    
2496
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2497
        prod[i] = a->u8[i] * b->u8[i];
2498
    }
2499

    
2500
    VECTOR_FOR_INORDER_I(i, u32) {
2501
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2502
    }
2503
}
2504

    
2505
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2506
{
2507
    uint32_t prod[8];
2508
    int i;
2509

    
2510
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2511
        prod[i] = a->u16[i] * b->u16[i];
2512
    }
2513

    
2514
    VECTOR_FOR_INORDER_I(i, u32) {
2515
        r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2516
    }
2517
}
2518

    
2519
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2520
{
2521
    uint32_t prod[8];
2522
    int i;
2523
    int sat = 0;
2524

    
2525
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2526
        prod[i] = a->u16[i] * b->u16[i];
2527
    }
2528

    
2529
    VECTOR_FOR_INORDER_I (i, s32) {
2530
        uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2531
        r->u32[i] = cvtuduw(t, &sat);
2532
    }
2533

    
2534
    if (sat) {
2535
        env->vscr |= (1 << VSCR_SAT);
2536
    }
2537
}
2538

    
2539
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2540
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2541
    {                                                                   \
2542
        int i;                                                          \
2543
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2544
            if (evenp) {                                                \
2545
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2546
            } else {                                                    \
2547
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2548
            }                                                           \
2549
        }                                                               \
2550
    }
2551
#define VMUL(suffix, mul_element, prod_element) \
2552
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2553
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2554
VMUL(sb, s8, s16)
2555
VMUL(sh, s16, s32)
2556
VMUL(ub, u8, u16)
2557
VMUL(uh, u16, u32)
2558
#undef VMUL_DO
2559
#undef VMUL
2560

    
2561
void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2562
{
2563
    int i;
2564
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2565
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2566
            /* Need to do the computation is higher precision and round
2567
             * once at the end.  */
2568
            float64 af, bf, cf, t;
2569
            af = float32_to_float64(a->f[i], &env->vec_status);
2570
            bf = float32_to_float64(b->f[i], &env->vec_status);
2571
            cf = float32_to_float64(c->f[i], &env->vec_status);
2572
            t = float64_mul(af, cf, &env->vec_status);
2573
            t = float64_sub(t, bf, &env->vec_status);
2574
            t = float64_chs(t);
2575
            r->f[i] = float64_to_float32(t, &env->vec_status);
2576
        }
2577
    }
2578
}
2579

    
2580
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2581
{
2582
    ppc_avr_t result;
2583
    int i;
2584
    VECTOR_FOR_INORDER_I (i, u8) {
2585
        int s = c->u8[i] & 0x1f;
2586
#if defined(HOST_WORDS_BIGENDIAN)
2587
        int index = s & 0xf;
2588
#else
2589
        int index = 15 - (s & 0xf);
2590
#endif
2591
        if (s & 0x10) {
2592
            result.u8[i] = b->u8[index];
2593
        } else {
2594
            result.u8[i] = a->u8[index];
2595
        }
2596
    }
2597
    *r = result;
2598
}
2599

    
2600
#if defined(HOST_WORDS_BIGENDIAN)
2601
#define PKBIG 1
2602
#else
2603
#define PKBIG 0
2604
#endif
2605
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2606
{
2607
    int i, j;
2608
    ppc_avr_t result;
2609
#if defined(HOST_WORDS_BIGENDIAN)
2610
    const ppc_avr_t *x[2] = { a, b };
2611
#else
2612
    const ppc_avr_t *x[2] = { b, a };
2613
#endif
2614

    
2615
    VECTOR_FOR_INORDER_I (i, u64) {
2616
        VECTOR_FOR_INORDER_I (j, u32){
2617
            uint32_t e = x[i]->u32[j];
2618
            result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2619
                                 ((e >> 6) & 0x3e0) |
2620
                                 ((e >> 3) & 0x1f));
2621
        }
2622
    }
2623
    *r = result;
2624
}
2625

    
2626
#define VPK(suffix, from, to, cvt, dosat)       \
2627
    void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2628
    {                                                                   \
2629
        int i;                                                          \
2630
        int sat = 0;                                                    \
2631
        ppc_avr_t result;                                               \
2632
        ppc_avr_t *a0 = PKBIG ? a : b;                                  \
2633
        ppc_avr_t *a1 = PKBIG ? b : a;                                  \
2634
        VECTOR_FOR_INORDER_I (i, from) {                                \
2635
            result.to[i] = cvt(a0->from[i], &sat);                      \
2636
            result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
2637
        }                                                               \
2638
        *r = result;                                                    \
2639
        if (dosat && sat) {                                             \
2640
            env->vscr |= (1 << VSCR_SAT);                               \
2641
        }                                                               \
2642
    }
2643
#define I(x, y) (x)
2644
VPK(shss, s16, s8, cvtshsb, 1)
2645
VPK(shus, s16, u8, cvtshub, 1)
2646
VPK(swss, s32, s16, cvtswsh, 1)
2647
VPK(swus, s32, u16, cvtswuh, 1)
2648
VPK(uhus, u16, u8, cvtuhub, 1)
2649
VPK(uwus, u32, u16, cvtuwuh, 1)
2650
VPK(uhum, u16, u8, I, 0)
2651
VPK(uwum, u32, u16, I, 0)
2652
#undef I
2653
#undef VPK
2654
#undef PKBIG
2655

    
2656
void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2657
{
2658
    int i;
2659
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2660
        HANDLE_NAN1(r->f[i], b->f[i]) {
2661
            r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2662
        }
2663
    }
2664
}
2665

    
2666
#define VRFI(suffix, rounding)                                          \
2667
    void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2668
    {                                                                   \
2669
        int i;                                                          \
2670
        float_status s = env->vec_status;                               \
2671
        set_float_rounding_mode(rounding, &s);                          \
2672
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2673
            HANDLE_NAN1(r->f[i], b->f[i]) {                             \
2674
                r->f[i] = float32_round_to_int (b->f[i], &s);           \
2675
            }                                                           \
2676
        }                                                               \
2677
    }
2678
VRFI(n, float_round_nearest_even)
2679
VRFI(m, float_round_down)
2680
VRFI(p, float_round_up)
2681
VRFI(z, float_round_to_zero)
2682
#undef VRFI
2683

    
2684
#define VROTATE(suffix, element)                                        \
2685
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2686
    {                                                                   \
2687
        int i;                                                          \
2688
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2689
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2690
            unsigned int shift = b->element[i] & mask;                  \
2691
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2692
        }                                                               \
2693
    }
2694
VROTATE(b, u8)
2695
VROTATE(h, u16)
2696
VROTATE(w, u32)
2697
#undef VROTATE
2698

    
2699
void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2700
{
2701
    int i;
2702
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2703
        HANDLE_NAN1(r->f[i], b->f[i]) {
2704
            float32 t = float32_sqrt(b->f[i], &env->vec_status);
2705
            r->f[i] = float32_div(float32_one, t, &env->vec_status);
2706
        }
2707
    }
2708
}
2709

    
2710
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2711
{
2712
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2713
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2714
}
2715

    
2716
void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2717
{
2718
    int i;
2719
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2720
        HANDLE_NAN1(r->f[i], b->f[i]) {
2721
            r->f[i] = float32_log2(b->f[i], &env->vec_status);
2722
        }
2723
    }
2724
}
2725

    
2726
#if defined(HOST_WORDS_BIGENDIAN)
2727
#define LEFT 0
2728
#define RIGHT 1
2729
#else
2730
#define LEFT 1
2731
#define RIGHT 0
2732
#endif
2733
/* The specification says that the results are undefined if all of the
2734
 * shift counts are not identical.  We check to make sure that they are
2735
 * to conform to what real hardware appears to do.  */
2736
#define VSHIFT(suffix, leftp)                                           \
2737
    void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
2738
    {                                                                   \
2739
        int shift = b->u8[LO_IDX*15] & 0x7;                             \
2740
        int doit = 1;                                                   \
2741
        int i;                                                          \
2742
        for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
2743
            doit = doit && ((b->u8[i] & 0x7) == shift);                 \
2744
        }                                                               \
2745
        if (doit) {                                                     \
2746
            if (shift == 0) {                                           \
2747
                *r = *a;                                                \
2748
            } else if (leftp) {                                         \
2749
                uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
2750
                r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
2751
                r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
2752
            } else {                                                    \
2753
                uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
2754
                r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
2755
                r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
2756
            }                                                           \
2757
        }                                                               \
2758
    }
2759
VSHIFT(l, LEFT)
2760
VSHIFT(r, RIGHT)
2761
#undef VSHIFT
2762
#undef LEFT
2763
#undef RIGHT
2764

    
2765
#define VSL(suffix, element)                                            \
2766
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2767
    {                                                                   \
2768
        int i;                                                          \
2769
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2770
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2771
            unsigned int shift = b->element[i] & mask;                  \
2772
            r->element[i] = a->element[i] << shift;                     \
2773
        }                                                               \
2774
    }
2775
VSL(b, u8)
2776
VSL(h, u16)
2777
VSL(w, u32)
2778
#undef VSL
2779

    
2780
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2781
{
2782
    int sh = shift & 0xf;
2783
    int i;
2784
    ppc_avr_t result;
2785

    
2786
#if defined(HOST_WORDS_BIGENDIAN)
2787
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2788
        int index = sh + i;
2789
        if (index > 0xf) {
2790
            result.u8[i] = b->u8[index-0x10];
2791
        } else {
2792
            result.u8[i] = a->u8[index];
2793
        }
2794
    }
2795
#else
2796
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2797
        int index = (16 - sh) + i;
2798
        if (index > 0xf) {
2799
            result.u8[i] = a->u8[index-0x10];
2800
        } else {
2801
            result.u8[i] = b->u8[index];
2802
        }
2803
    }
2804
#endif
2805
    *r = result;
2806
}
2807

    
2808
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2809
{
2810
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2811

    
2812
#if defined (HOST_WORDS_BIGENDIAN)
2813
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2814
  memset (&r->u8[16-sh], 0, sh);
2815
#else
2816
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2817
  memset (&r->u8[0], 0, sh);
2818
#endif
2819
}
2820

    
2821
/* Experimental testing shows that hardware masks the immediate.  */
2822
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2823
#if defined(HOST_WORDS_BIGENDIAN)
2824
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2825
#else
2826
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2827
#endif
2828
#define VSPLT(suffix, element)                                          \
2829
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2830
    {                                                                   \
2831
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2832
        int i;                                                          \
2833
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2834
            r->element[i] = s;                                          \
2835
        }                                                               \
2836
    }
2837
VSPLT(b, u8)
2838
VSPLT(h, u16)
2839
VSPLT(w, u32)
2840
#undef VSPLT
2841
#undef SPLAT_ELEMENT
2842
#undef _SPLAT_MASKED
2843

    
2844
#define VSPLTI(suffix, element, splat_type)                     \
2845
    void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat)  \
2846
    {                                                           \
2847
        splat_type x = (int8_t)(splat << 3) >> 3;               \
2848
        int i;                                                  \
2849
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
2850
            r->element[i] = x;                                  \
2851
        }                                                       \
2852
    }
2853
VSPLTI(b, s8, int8_t)
2854
VSPLTI(h, s16, int16_t)
2855
VSPLTI(w, s32, int32_t)
2856
#undef VSPLTI
2857

    
2858
#define VSR(suffix, element)                                            \
2859
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2860
    {                                                                   \
2861
        int i;                                                          \
2862
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2863
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2864
            unsigned int shift = b->element[i] & mask;                  \
2865
            r->element[i] = a->element[i] >> shift;                     \
2866
        }                                                               \
2867
    }
2868
VSR(ab, s8)
2869
VSR(ah, s16)
2870
VSR(aw, s32)
2871
VSR(b, u8)
2872
VSR(h, u16)
2873
VSR(w, u32)
2874
#undef VSR
2875

    
2876
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2877
{
2878
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2879

    
2880
#if defined (HOST_WORDS_BIGENDIAN)
2881
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2882
  memset (&r->u8[0], 0, sh);
2883
#else
2884
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2885
  memset (&r->u8[16-sh], 0, sh);
2886
#endif
2887
}
2888

    
2889
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2890
{
2891
    int i;
2892
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2893
        r->u32[i] = a->u32[i] >= b->u32[i];
2894
    }
2895
}
2896

    
2897
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2898
{
2899
    int64_t t;
2900
    int i, upper;
2901
    ppc_avr_t result;
2902
    int sat = 0;
2903

    
2904
#if defined(HOST_WORDS_BIGENDIAN)
2905
    upper = ARRAY_SIZE(r->s32)-1;
2906
#else
2907
    upper = 0;
2908
#endif
2909
    t = (int64_t)b->s32[upper];
2910
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2911
        t += a->s32[i];
2912
        result.s32[i] = 0;
2913
    }
2914
    result.s32[upper] = cvtsdsw(t, &sat);
2915
    *r = result;
2916

    
2917
    if (sat) {
2918
        env->vscr |= (1 << VSCR_SAT);
2919
    }
2920
}
2921

    
2922
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2923
{
2924
    int i, j, upper;
2925
    ppc_avr_t result;
2926
    int sat = 0;
2927

    
2928
#if defined(HOST_WORDS_BIGENDIAN)
2929
    upper = 1;
2930
#else
2931
    upper = 0;
2932
#endif
2933
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2934
        int64_t t = (int64_t)b->s32[upper+i*2];
2935
        result.u64[i] = 0;
2936
        for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2937
            t += a->s32[2*i+j];
2938
        }
2939
        result.s32[upper+i*2] = cvtsdsw(t, &sat);
2940
    }
2941

    
2942
    *r = result;
2943
    if (sat) {
2944
        env->vscr |= (1 << VSCR_SAT);
2945
    }
2946
}
2947

    
2948
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2949
{
2950
    int i, j;
2951
    int sat = 0;
2952

    
2953
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2954
        int64_t t = (int64_t)b->s32[i];
2955
        for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2956
            t += a->s8[4*i+j];
2957
        }
2958
        r->s32[i] = cvtsdsw(t, &sat);
2959
    }
2960

    
2961
    if (sat) {
2962
        env->vscr |= (1 << VSCR_SAT);
2963
    }
2964
}
2965

    
2966
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2967
{
2968
    int sat = 0;
2969
    int i;
2970

    
2971
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2972
        int64_t t = (int64_t)b->s32[i];
2973
        t += a->s16[2*i] + a->s16[2*i+1];
2974
        r->s32[i] = cvtsdsw(t, &sat);
2975
    }
2976

    
2977
    if (sat) {
2978
        env->vscr |= (1 << VSCR_SAT);
2979
    }
2980
}
2981

    
2982
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2983
{
2984
    int i, j;
2985
    int sat = 0;
2986

    
2987
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2988
        uint64_t t = (uint64_t)b->u32[i];
2989
        for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2990
            t += a->u8[4*i+j];
2991
        }
2992
        r->u32[i] = cvtuduw(t, &sat);
2993
    }
2994

    
2995
    if (sat) {
2996
        env->vscr |= (1 << VSCR_SAT);
2997
    }
2998
}
2999

    
3000
#if defined(HOST_WORDS_BIGENDIAN)
3001
#define UPKHI 1
3002
#define UPKLO 0
3003
#else
3004
#define UPKHI 0
3005
#define UPKLO 1
3006
#endif
3007
#define VUPKPX(suffix, hi)                                      \
3008
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
3009
    {                                                           \
3010
        int i;                                                  \
3011
        ppc_avr_t result;                                       \
3012
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
3013
            uint16_t e = b->u16[hi ? i : i+4];                  \
3014
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
3015
            uint8_t r = (e >> 10) & 0x1f;                       \
3016
            uint8_t g = (e >> 5) & 0x1f;                        \
3017
            uint8_t b = e & 0x1f;                               \
3018
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
3019
        }                                                               \
3020
        *r = result;                                                    \
3021
    }
3022
VUPKPX(lpx, UPKLO)
3023
VUPKPX(hpx, UPKHI)
3024
#undef VUPKPX
3025

    
3026
#define VUPK(suffix, unpacked, packee, hi)                              \
3027
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
3028
    {                                                                   \
3029
        int i;                                                          \
3030
        ppc_avr_t result;                                               \
3031
        if (hi) {                                                       \
3032
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
3033
                result.unpacked[i] = b->packee[i];                      \
3034
            }                                                           \
3035
        } else {                                                        \
3036
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3037
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3038
            }                                                           \
3039
        }                                                               \
3040
        *r = result;                                                    \
3041
    }
3042
VUPK(hsb, s16, s8, UPKHI)
3043
VUPK(hsh, s32, s16, UPKHI)
3044
VUPK(lsb, s16, s8, UPKLO)
3045
VUPK(lsh, s32, s16, UPKLO)
3046
#undef VUPK
3047
#undef UPKHI
3048
#undef UPKLO
3049

    
3050
#undef DO_HANDLE_NAN
3051
#undef HANDLE_NAN1
3052
#undef HANDLE_NAN2
3053
#undef HANDLE_NAN3
3054
#undef VECTOR_FOR_INORDER_I
3055
#undef HI_IDX
3056
#undef LO_IDX
3057

    
3058
/*****************************************************************************/
3059
/* SPE extension helpers */
3060
/* Use a table to make this quicker */
3061
static uint8_t hbrev[16] = {
3062
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3063
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3064
};
3065

    
3066
static inline uint8_t byte_reverse(uint8_t val)
3067
{
3068
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3069
}
3070

    
3071
static inline uint32_t word_reverse(uint32_t val)
3072
{
3073
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3074
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3075
}
3076

    
3077
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3078
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3079
{
3080
    uint32_t a, b, d, mask;
3081

    
3082
    mask = UINT32_MAX >> (32 - MASKBITS);
3083
    a = arg1 & mask;
3084
    b = arg2 & mask;
3085
    d = word_reverse(1 + word_reverse(a | ~b));
3086
    return (arg1 & ~mask) | (d & b);
3087
}
3088

    
3089
uint32_t helper_cntlsw32 (uint32_t val)
3090
{
3091
    if (val & 0x80000000)
3092
        return clz32(~val);
3093
    else
3094
        return clz32(val);
3095
}
3096

    
3097
uint32_t helper_cntlzw32 (uint32_t val)
3098
{
3099
    return clz32(val);
3100
}
3101

    
3102
/* Single-precision floating-point conversions */
3103
static inline uint32_t efscfsi(uint32_t val)
3104
{
3105
    CPU_FloatU u;
3106

    
3107
    u.f = int32_to_float32(val, &env->vec_status);
3108

    
3109
    return u.l;
3110
}
3111

    
3112
static inline uint32_t efscfui(uint32_t val)
3113
{
3114
    CPU_FloatU u;
3115

    
3116
    u.f = uint32_to_float32(val, &env->vec_status);
3117

    
3118
    return u.l;
3119
}
3120

    
3121
static inline int32_t efsctsi(uint32_t val)
3122
{
3123
    CPU_FloatU u;
3124

    
3125
    u.l = val;
3126
    /* NaN are not treated the same way IEEE 754 does */
3127
    if (unlikely(float32_is_nan(u.f)))
3128
        return 0;
3129

    
3130
    return float32_to_int32(u.f, &env->vec_status);
3131
}
3132

    
3133
static inline uint32_t efsctui(uint32_t val)
3134
{
3135
    CPU_FloatU u;
3136

    
3137
    u.l = val;
3138
    /* NaN are not treated the same way IEEE 754 does */
3139
    if (unlikely(float32_is_nan(u.f)))
3140
        return 0;
3141

    
3142
    return float32_to_uint32(u.f, &env->vec_status);
3143
}
3144

    
3145
static inline uint32_t efsctsiz(uint32_t val)
3146
{
3147
    CPU_FloatU u;
3148

    
3149
    u.l = val;
3150
    /* NaN are not treated the same way IEEE 754 does */
3151
    if (unlikely(float32_is_nan(u.f)))
3152
        return 0;
3153

    
3154
    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3155
}
3156

    
3157
static inline uint32_t efsctuiz(uint32_t val)
3158
{
3159
    CPU_FloatU u;
3160

    
3161
    u.l = val;
3162
    /* NaN are not treated the same way IEEE 754 does */
3163
    if (unlikely(float32_is_nan(u.f)))
3164
        return 0;
3165

    
3166
    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3167
}
3168

    
3169
static inline uint32_t efscfsf(uint32_t val)
3170
{
3171
    CPU_FloatU u;
3172
    float32 tmp;
3173

    
3174
    u.f = int32_to_float32(val, &env->vec_status);
3175
    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3176
    u.f = float32_div(u.f, tmp, &env->vec_status);
3177

    
3178
    return u.l;
3179
}
3180

    
3181
static inline uint32_t efscfuf(uint32_t val)
3182
{
3183
    CPU_FloatU u;
3184
    float32 tmp;
3185

    
3186
    u.f = uint32_to_float32(val, &env->vec_status);
3187
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3188
    u.f = float32_div(u.f, tmp, &env->vec_status);
3189

    
3190
    return u.l;
3191
}
3192

    
3193
static inline uint32_t efsctsf(uint32_t val)
3194
{
3195
    CPU_FloatU u;
3196
    float32 tmp;
3197

    
3198
    u.l = val;
3199
    /* NaN are not treated the same way IEEE 754 does */
3200
    if (unlikely(float32_is_nan(u.f)))
3201
        return 0;
3202
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3203
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3204

    
3205
    return float32_to_int32(u.f, &env->vec_status);
3206
}
3207

    
3208
static inline uint32_t efsctuf(uint32_t val)
3209
{
3210
    CPU_FloatU u;
3211
    float32 tmp;
3212

    
3213
    u.l = val;
3214
    /* NaN are not treated the same way IEEE 754 does */
3215
    if (unlikely(float32_is_nan(u.f)))
3216
        return 0;
3217
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3218
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3219

    
3220
    return float32_to_uint32(u.f, &env->vec_status);
3221
}
3222

    
3223
#define HELPER_SPE_SINGLE_CONV(name)                                          \
3224
uint32_t helper_e##name (uint32_t val)                                        \
3225
{                                                                             \
3226
    return e##name(val);                                                      \
3227
}
3228
/* efscfsi */
3229
HELPER_SPE_SINGLE_CONV(fscfsi);
3230
/* efscfui */
3231
HELPER_SPE_SINGLE_CONV(fscfui);
3232
/* efscfuf */
3233
HELPER_SPE_SINGLE_CONV(fscfuf);
3234
/* efscfsf */
3235
HELPER_SPE_SINGLE_CONV(fscfsf);
3236
/* efsctsi */
3237
HELPER_SPE_SINGLE_CONV(fsctsi);
3238
/* efsctui */
3239
HELPER_SPE_SINGLE_CONV(fsctui);
3240
/* efsctsiz */
3241
HELPER_SPE_SINGLE_CONV(fsctsiz);
3242
/* efsctuiz */
3243
HELPER_SPE_SINGLE_CONV(fsctuiz);
3244
/* efsctsf */
3245
HELPER_SPE_SINGLE_CONV(fsctsf);
3246
/* efsctuf */
3247
HELPER_SPE_SINGLE_CONV(fsctuf);
3248

    
3249
#define HELPER_SPE_VECTOR_CONV(name)                                          \
3250
uint64_t helper_ev##name (uint64_t val)                                       \
3251
{                                                                             \
3252
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
3253
            (uint64_t)e##name(val);                                           \
3254
}
3255
/* evfscfsi */
3256
HELPER_SPE_VECTOR_CONV(fscfsi);
3257
/* evfscfui */
3258
HELPER_SPE_VECTOR_CONV(fscfui);
3259
/* evfscfuf */
3260
HELPER_SPE_VECTOR_CONV(fscfuf);
3261
/* evfscfsf */
3262
HELPER_SPE_VECTOR_CONV(fscfsf);
3263
/* evfsctsi */
3264
HELPER_SPE_VECTOR_CONV(fsctsi);
3265
/* evfsctui */
3266
HELPER_SPE_VECTOR_CONV(fsctui);
3267
/* evfsctsiz */
3268
HELPER_SPE_VECTOR_CONV(fsctsiz);
3269
/* evfsctuiz */
3270
HELPER_SPE_VECTOR_CONV(fsctuiz);
3271
/* evfsctsf */
3272
HELPER_SPE_VECTOR_CONV(fsctsf);
3273
/* evfsctuf */
3274
HELPER_SPE_VECTOR_CONV(fsctuf);
3275

    
3276
/* Single-precision floating-point arithmetic */
3277
static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3278
{
3279
    CPU_FloatU u1, u2;
3280
    u1.l = op1;
3281
    u2.l = op2;
3282
    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3283
    return u1.l;
3284
}
3285

    
3286
static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3287
{
3288
    CPU_FloatU u1, u2;
3289
    u1.l = op1;
3290
    u2.l = op2;
3291
    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3292
    return u1.l;
3293
}
3294

    
3295
static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3296
{
3297
    CPU_FloatU u1, u2;
3298
    u1.l = op1;
3299
    u2.l = op2;
3300
    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3301
    return u1.l;
3302
}
3303

    
3304
static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3305
{
3306
    CPU_FloatU u1, u2;
3307
    u1.l = op1;
3308
    u2.l = op2;
3309
    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3310
    return u1.l;
3311
}
3312

    
3313
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
3314
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3315
{                                                                             \
3316
    return e##name(op1, op2);                                                 \
3317
}
3318
/* efsadd */
3319
HELPER_SPE_SINGLE_ARITH(fsadd);
3320
/* efssub */
3321
HELPER_SPE_SINGLE_ARITH(fssub);
3322
/* efsmul */
3323
HELPER_SPE_SINGLE_ARITH(fsmul);
3324
/* efsdiv */
3325
HELPER_SPE_SINGLE_ARITH(fsdiv);
3326

    
3327
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
3328
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3329
{                                                                             \
3330
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
3331
            (uint64_t)e##name(op1, op2);                                      \
3332
}
3333
/* evfsadd */
3334
HELPER_SPE_VECTOR_ARITH(fsadd);
3335
/* evfssub */
3336
HELPER_SPE_VECTOR_ARITH(fssub);
3337
/* evfsmul */
3338
HELPER_SPE_VECTOR_ARITH(fsmul);
3339
/* evfsdiv */
3340
HELPER_SPE_VECTOR_ARITH(fsdiv);
3341

    
3342
/* Single-precision floating-point comparisons */
3343
static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3344
{
3345
    CPU_FloatU u1, u2;
3346
    u1.l = op1;
3347
    u2.l = op2;
3348
    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3349
}
3350

    
3351
static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3352
{
3353
    CPU_FloatU u1, u2;
3354
    u1.l = op1;
3355
    u2.l = op2;
3356
    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3357
}
3358

    
3359
static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3360
{
3361
    CPU_FloatU u1, u2;
3362
    u1.l = op1;
3363
    u2.l = op2;
3364
    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3365
}
3366

    
3367
static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3368
{
3369
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3370
    return efststlt(op1, op2);
3371
}
3372

    
3373
static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3374
{
3375
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3376
    return efststgt(op1, op2);
3377
}
3378

    
3379
static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3380
{
3381
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3382
    return efststeq(op1, op2);
3383
}
3384

    
3385
#define HELPER_SINGLE_SPE_CMP(name)                                           \
3386
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3387
{                                                                             \
3388
    return e##name(op1, op2) << 2;                                            \
3389
}
3390
/* efststlt */
3391
HELPER_SINGLE_SPE_CMP(fststlt);
3392
/* efststgt */
3393
HELPER_SINGLE_SPE_CMP(fststgt);
3394
/* efststeq */
3395
HELPER_SINGLE_SPE_CMP(fststeq);
3396
/* efscmplt */
3397
HELPER_SINGLE_SPE_CMP(fscmplt);
3398
/* efscmpgt */
3399
HELPER_SINGLE_SPE_CMP(fscmpgt);
3400
/* efscmpeq */
3401
HELPER_SINGLE_SPE_CMP(fscmpeq);
3402

    
3403
static inline uint32_t evcmp_merge(int t0, int t1)
3404
{
3405
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3406
}
3407

    
3408
#define HELPER_VECTOR_SPE_CMP(name)                                           \
3409
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3410
{                                                                             \
3411
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
3412
}
3413
/* evfststlt */
3414
HELPER_VECTOR_SPE_CMP(fststlt);
3415
/* evfststgt */
3416
HELPER_VECTOR_SPE_CMP(fststgt);
3417
/* evfststeq */
3418
HELPER_VECTOR_SPE_CMP(fststeq);
3419
/* evfscmplt */
3420
HELPER_VECTOR_SPE_CMP(fscmplt);
3421
/* evfscmpgt */
3422
HELPER_VECTOR_SPE_CMP(fscmpgt);
3423
/* evfscmpeq */
3424
HELPER_VECTOR_SPE_CMP(fscmpeq);
3425

    
3426
/* Double-precision floating-point conversion */
3427
uint64_t helper_efdcfsi (uint32_t val)
3428
{
3429
    CPU_DoubleU u;
3430

    
3431
    u.d = int32_to_float64(val, &env->vec_status);
3432

    
3433
    return u.ll;
3434
}
3435

    
3436
uint64_t helper_efdcfsid (uint64_t val)
3437
{
3438
    CPU_DoubleU u;
3439

    
3440
    u.d = int64_to_float64(val, &env->vec_status);
3441

    
3442
    return u.ll;
3443
}
3444

    
3445
uint64_t helper_efdcfui (uint32_t val)
3446
{
3447
    CPU_DoubleU u;
3448

    
3449
    u.d = uint32_to_float64(val, &env->vec_status);
3450

    
3451
    return u.ll;
3452
}
3453

    
3454
uint64_t helper_efdcfuid (uint64_t val)
3455
{
3456
    CPU_DoubleU u;
3457

    
3458
    u.d = uint64_to_float64(val, &env->vec_status);
3459

    
3460
    return u.ll;
3461
}
3462

    
3463
uint32_t helper_efdctsi (uint64_t val)
3464
{
3465
    CPU_DoubleU u;
3466

    
3467
    u.ll = val;
3468
    /* NaN are not treated the same way IEEE 754 does */
3469
    if (unlikely(float64_is_nan(u.d)))
3470
        return 0;
3471

    
3472
    return float64_to_int32(u.d, &env->vec_status);
3473
}
3474

    
3475
uint32_t helper_efdctui (uint64_t val)
3476
{
3477
    CPU_DoubleU u;
3478

    
3479
    u.ll = val;
3480
    /* NaN are not treated the same way IEEE 754 does */
3481
    if (unlikely(float64_is_nan(u.d)))
3482
        return 0;
3483

    
3484
    return float64_to_uint32(u.d, &env->vec_status);
3485
}
3486

    
3487
uint32_t helper_efdctsiz (uint64_t val)
3488
{
3489
    CPU_DoubleU u;
3490

    
3491
    u.ll = val;
3492
    /* NaN are not treated the same way IEEE 754 does */
3493
    if (unlikely(float64_is_nan(u.d)))
3494
        return 0;
3495

    
3496
    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3497
}
3498

    
3499
uint64_t helper_efdctsidz (uint64_t val)
3500
{
3501
    CPU_DoubleU u;
3502

    
3503
    u.ll = val;
3504
    /* NaN are not treated the same way IEEE 754 does */
3505
    if (unlikely(float64_is_nan(u.d)))
3506
        return 0;
3507

    
3508
    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3509
}
3510

    
3511
uint32_t helper_efdctuiz (uint64_t val)
3512
{
3513
    CPU_DoubleU u;
3514

    
3515
    u.ll = val;
3516
    /* NaN are not treated the same way IEEE 754 does */
3517
    if (unlikely(float64_is_nan(u.d)))
3518
        return 0;
3519

    
3520
    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3521
}
3522

    
3523
uint64_t helper_efdctuidz (uint64_t val)
3524
{
3525
    CPU_DoubleU u;
3526

    
3527
    u.ll = val;
3528
    /* NaN are not treated the same way IEEE 754 does */
3529
    if (unlikely(float64_is_nan(u.d)))
3530
        return 0;
3531

    
3532
    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3533
}
3534

    
3535
uint64_t helper_efdcfsf (uint32_t val)
3536
{
3537
    CPU_DoubleU u;
3538
    float64 tmp;
3539

    
3540
    u.d = int32_to_float64(val, &env->vec_status);
3541
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3542
    u.d = float64_div(u.d, tmp, &env->vec_status);
3543

    
3544
    return u.ll;
3545
}
3546

    
3547
uint64_t helper_efdcfuf (uint32_t val)
3548
{
3549
    CPU_DoubleU u;
3550
    float64 tmp;
3551

    
3552
    u.d = uint32_to_float64(val, &env->vec_status);
3553
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3554
    u.d = float64_div(u.d, tmp, &env->vec_status);
3555

    
3556
    return u.ll;
3557
}
3558

    
3559
uint32_t helper_efdctsf (uint64_t val)
3560
{
3561
    CPU_DoubleU u;
3562
    float64 tmp;
3563

    
3564
    u.ll = val;
3565
    /* NaN are not treated the same way IEEE 754 does */
3566
    if (unlikely(float64_is_nan(u.d)))
3567
        return 0;
3568
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3569
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3570

    
3571
    return float64_to_int32(u.d, &env->vec_status);
3572
}
3573

    
3574
uint32_t helper_efdctuf (uint64_t val)
3575
{
3576
    CPU_DoubleU u;
3577
    float64 tmp;
3578

    
3579
    u.ll = val;
3580
    /* NaN are not treated the same way IEEE 754 does */
3581
    if (unlikely(float64_is_nan(u.d)))
3582
        return 0;
3583
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3584
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3585

    
3586
    return float64_to_uint32(u.d, &env->vec_status);
3587
}
3588

    
3589
uint32_t helper_efscfd (uint64_t val)
3590
{
3591
    CPU_DoubleU u1;
3592
    CPU_FloatU u2;
3593

    
3594
    u1.ll = val;
3595
    u2.f = float64_to_float32(u1.d, &env->vec_status);
3596

    
3597
    return u2.l;
3598
}
3599

    
3600
uint64_t helper_efdcfs (uint32_t val)
3601
{
3602
    CPU_DoubleU u2;
3603
    CPU_FloatU u1;
3604

    
3605
    u1.l = val;
3606
    u2.d = float32_to_float64(u1.f, &env->vec_status);
3607

    
3608
    return u2.ll;
3609
}
3610

    
3611
/* Double precision fixed-point arithmetic */
3612
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3613
{
3614
    CPU_DoubleU u1, u2;
3615
    u1.ll = op1;
3616
    u2.ll = op2;
3617
    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3618
    return u1.ll;
3619
}
3620

    
3621
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3622
{
3623
    CPU_DoubleU u1, u2;
3624
    u1.ll = op1;
3625
    u2.ll = op2;
3626
    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3627
    return u1.ll;
3628
}
3629

    
3630
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3631
{
3632
    CPU_DoubleU u1, u2;
3633
    u1.ll = op1;
3634
    u2.ll = op2;
3635
    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3636
    return u1.ll;
3637
}
3638

    
3639
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3640
{
3641
    CPU_DoubleU u1, u2;
3642
    u1.ll = op1;
3643
    u2.ll = op2;
3644
    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3645
    return u1.ll;
3646
}
3647

    
3648
/* Double precision floating point helpers */
3649
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3650
{
3651
    CPU_DoubleU u1, u2;
3652
    u1.ll = op1;
3653
    u2.ll = op2;
3654
    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3655
}
3656

    
3657
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3658
{
3659
    CPU_DoubleU u1, u2;
3660
    u1.ll = op1;
3661
    u2.ll = op2;
3662
    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3663
}
3664

    
3665
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3666
{
3667
    CPU_DoubleU u1, u2;
3668
    u1.ll = op1;
3669
    u2.ll = op2;
3670
    return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3671
}
3672

    
3673
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3674
{
3675
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3676
    return helper_efdtstlt(op1, op2);
3677
}
3678

    
3679
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3680
{
3681
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3682
    return helper_efdtstgt(op1, op2);
3683
}
3684

    
3685
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3686
{
3687
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3688
    return helper_efdtsteq(op1, op2);
3689
}
3690

    
3691
/*****************************************************************************/
3692
/* Softmmu support */
3693
#if !defined (CONFIG_USER_ONLY)
3694

    
3695
#define MMUSUFFIX _mmu
3696

    
3697
#define SHIFT 0
3698
#include "softmmu_template.h"
3699

    
3700
#define SHIFT 1
3701
#include "softmmu_template.h"
3702

    
3703
#define SHIFT 2
3704
#include "softmmu_template.h"
3705

    
3706
#define SHIFT 3
3707
#include "softmmu_template.h"
3708

    
3709
/* try to fill the TLB and return an exception if error. If retaddr is
3710
   NULL, it means that the function was called in C code (i.e. not
3711
   from generated code or from helper.c) */
3712
/* XXX: fix it to restore all registers */
3713
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3714
{
3715
    TranslationBlock *tb;
3716
    CPUState *saved_env;
3717
    unsigned long pc;
3718
    int ret;
3719

    
3720
    /* XXX: hack to restore env in all cases, even if not called from
3721
       generated code */
3722
    saved_env = env;
3723
    env = cpu_single_env;
3724
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3725
    if (unlikely(ret != 0)) {
3726
        if (likely(retaddr)) {
3727
            /* now we have a real cpu fault */
3728
            pc = (unsigned long)retaddr;
3729
            tb = tb_find_pc(pc);
3730
            if (likely(tb)) {
3731
                /* the PC is inside the translated code. It means that we have
3732
                   a virtual CPU fault */
3733
                cpu_restore_state(tb, env, pc, NULL);
3734
            }
3735
        }
3736
        helper_raise_exception_err(env->exception_index, env->error_code);
3737
    }
3738
    env = saved_env;
3739
}
3740

    
3741
/* Segment registers load and store */
3742
target_ulong helper_load_sr (target_ulong sr_num)
3743
{
3744
#if defined(TARGET_PPC64)
3745
    if (env->mmu_model & POWERPC_MMU_64)
3746
        return ppc_load_sr(env, sr_num);
3747
#endif
3748
    return env->sr[sr_num];
3749
}
3750

    
3751
void helper_store_sr (target_ulong sr_num, target_ulong val)
3752
{
3753
    ppc_store_sr(env, sr_num, val);
3754
}
3755

    
3756
/* SLB management */
3757
#if defined(TARGET_PPC64)
3758
target_ulong helper_load_slb (target_ulong slb_nr)
3759
{
3760
    return ppc_load_slb(env, slb_nr);
3761
}
3762

    
3763
void helper_store_slb (target_ulong rb, target_ulong rs)
3764
{
3765
    ppc_store_slb(env, rb, rs);
3766
}
3767

    
3768
void helper_slbia (void)
3769
{
3770
    ppc_slb_invalidate_all(env);
3771
}
3772

    
3773
void helper_slbie (target_ulong addr)
3774
{
3775
    ppc_slb_invalidate_one(env, addr);
3776
}
3777

    
3778
#endif /* defined(TARGET_PPC64) */
3779

    
3780
/* TLB management */
3781
void helper_tlbia (void)
3782
{
3783
    ppc_tlb_invalidate_all(env);
3784
}
3785

    
3786
void helper_tlbie (target_ulong addr)
3787
{
3788
    ppc_tlb_invalidate_one(env, addr);
3789
}
3790

    
3791
/* Software driven TLBs management */
3792
/* PowerPC 602/603 software TLB load instructions helpers */
3793
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3794
{
3795
    target_ulong RPN, CMP, EPN;
3796
    int way;
3797

    
3798
    RPN = env->spr[SPR_RPA];
3799
    if (is_code) {
3800
        CMP = env->spr[SPR_ICMP];
3801
        EPN = env->spr[SPR_IMISS];
3802
    } else {
3803
        CMP = env->spr[SPR_DCMP];
3804
        EPN = env->spr[SPR_DMISS];
3805
    }
3806
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3807
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3808
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3809
              RPN, way);
3810
    /* Store this TLB */
3811
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3812
                     way, is_code, CMP, RPN);
3813
}
3814

    
3815
void helper_6xx_tlbd (target_ulong EPN)
3816
{
3817
    do_6xx_tlb(EPN, 0);
3818
}
3819

    
3820
void helper_6xx_tlbi (target_ulong EPN)
3821
{
3822
    do_6xx_tlb(EPN, 1);
3823
}
3824

    
3825
/* PowerPC 74xx software TLB load instructions helpers */
3826
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3827
{
3828
    target_ulong RPN, CMP, EPN;
3829
    int way;
3830

    
3831
    RPN = env->spr[SPR_PTELO];
3832
    CMP = env->spr[SPR_PTEHI];
3833
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3834
    way = env->spr[SPR_TLBMISS] & 0x3;
3835
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3836
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3837
              RPN, way);
3838
    /* Store this TLB */
3839
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3840
                     way, is_code, CMP, RPN);
3841
}
3842

    
3843
void helper_74xx_tlbd (target_ulong EPN)
3844
{
3845
    do_74xx_tlb(EPN, 0);
3846
}
3847

    
3848
void helper_74xx_tlbi (target_ulong EPN)
3849
{
3850
    do_74xx_tlb(EPN, 1);
3851
}
3852

    
3853
static inline target_ulong booke_tlb_to_page_size(int size)
3854
{
3855
    return 1024 << (2 * size);
3856
}
3857

    
3858
static inline int booke_page_size_to_tlb(target_ulong page_size)
3859
{
3860
    int size;
3861

    
3862
    switch (page_size) {
3863
    case 0x00000400UL:
3864
        size = 0x0;
3865
        break;
3866
    case 0x00001000UL:
3867
        size = 0x1;
3868
        break;
3869
    case 0x00004000UL:
3870
        size = 0x2;
3871
        break;
3872
    case 0x00010000UL:
3873
        size = 0x3;
3874
        break;
3875
    case 0x00040000UL:
3876
        size = 0x4;
3877
        break;
3878
    case 0x00100000UL:
3879
        size = 0x5;
3880
        break;
3881
    case 0x00400000UL:
3882
        size = 0x6;
3883
        break;
3884
    case 0x01000000UL:
3885
        size = 0x7;
3886
        break;
3887
    case 0x04000000UL:
3888
        size = 0x8;
3889
        break;
3890
    case 0x10000000UL:
3891
        size = 0x9;
3892
        break;
3893
    case 0x40000000UL:
3894
        size = 0xA;
3895
        break;
3896
#if defined (TARGET_PPC64)
3897
    case 0x000100000000ULL:
3898
        size = 0xB;
3899
        break;
3900
    case 0x000400000000ULL:
3901
        size = 0xC;
3902
        break;
3903
    case 0x001000000000ULL:
3904
        size = 0xD;
3905
        break;
3906
    case 0x004000000000ULL:
3907
        size = 0xE;
3908
        break;
3909
    case 0x010000000000ULL:
3910
        size = 0xF;
3911
        break;
3912
#endif
3913
    default:
3914
        size = -1;
3915
        break;
3916
    }
3917

    
3918
    return size;
3919
}
3920

    
3921
/* Helpers for 4xx TLB management */
3922
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3923
{
3924
    ppcemb_tlb_t *tlb;
3925
    target_ulong ret;
3926
    int size;
3927

    
3928
    entry &= 0x3F;
3929
    tlb = &env->tlb[entry].tlbe;
3930
    ret = tlb->EPN;
3931
    if (tlb->prot & PAGE_VALID)
3932
        ret |= 0x400;
3933
    size = booke_page_size_to_tlb(tlb->size);
3934
    if (size < 0 || size > 0x7)
3935
        size = 1;
3936
    ret |= size << 7;
3937
    env->spr[SPR_40x_PID] = tlb->PID;
3938
    return ret;
3939
}
3940

    
3941
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3942
{
3943
    ppcemb_tlb_t *tlb;
3944
    target_ulong ret;
3945

    
3946
    entry &= 0x3F;
3947
    tlb = &env->tlb[entry].tlbe;
3948
    ret = tlb->RPN;
3949
    if (tlb->prot & PAGE_EXEC)
3950
        ret |= 0x200;
3951
    if (tlb->prot & PAGE_WRITE)
3952
        ret |= 0x100;
3953
    return ret;
3954
}
3955

    
3956
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3957
{
3958
    ppcemb_tlb_t *tlb;
3959
    target_ulong page, end;
3960

    
3961
    LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
3962
              val);
3963
    entry &= 0x3F;
3964
    tlb = &env->tlb[entry].tlbe;
3965
    /* Invalidate previous TLB (if it's valid) */
3966
    if (tlb->prot & PAGE_VALID) {
3967
        end = tlb->EPN + tlb->size;
3968
        LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
3969
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
3970
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3971
            tlb_flush_page(env, page);
3972
    }
3973
    tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3974
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3975
     * If this ever occurs, one should use the ppcemb target instead
3976
     * of the ppc or ppc64 one
3977
     */
3978
    if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3979
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3980
                  "are not supported (%d)\n",
3981
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3982
    }
3983
    tlb->EPN = val & ~(tlb->size - 1);
3984
    if (val & 0x40) {
3985
        tlb->prot |= PAGE_VALID;
3986
        if (val & 0x20) {
3987
            /* XXX: TO BE FIXED */
3988
            cpu_abort(env,
3989
                      "Little-endian TLB entries are not supported by now\n");
3990
        }
3991
    } else {
3992
        tlb->prot &= ~PAGE_VALID;
3993
    }
3994
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3995
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
3996
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
3997
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3998
              tlb->prot & PAGE_READ ? 'r' : '-',
3999
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4000
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4001
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4002
    /* Invalidate new TLB (if valid) */
4003
    if (tlb->prot & PAGE_VALID) {
4004
        end = tlb->EPN + tlb->size;
4005
        LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4006
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4007
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
4008
            tlb_flush_page(env, page);
4009
    }
4010
}
4011

    
4012
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4013
{
4014
    ppcemb_tlb_t *tlb;
4015

    
4016
    LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4017
              val);
4018
    entry &= 0x3F;
4019
    tlb = &env->tlb[entry].tlbe;
4020
    tlb->attr = val & 0xFF;
4021
    tlb->RPN = val & 0xFFFFFC00;
4022
    tlb->prot = PAGE_READ;
4023
    if (val & 0x200)
4024
        tlb->prot |= PAGE_EXEC;
4025
    if (val & 0x100)
4026
        tlb->prot |= PAGE_WRITE;
4027
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4028
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4029
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4030
              tlb->prot & PAGE_READ ? 'r' : '-',
4031
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4032
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4033
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4034
}
4035

    
4036
target_ulong helper_4xx_tlbsx (target_ulong address)
4037
{
4038
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4039
}
4040

    
4041
/* PowerPC 440 TLB management */
4042
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4043
{
4044
    ppcemb_tlb_t *tlb;
4045
    target_ulong EPN, RPN, size;
4046
    int do_flush_tlbs;
4047

    
4048
    LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4049
              __func__, word, (int)entry, value);
4050
    do_flush_tlbs = 0;
4051
    entry &= 0x3F;
4052
    tlb = &env->tlb[entry].tlbe;
4053
    switch (word) {
4054
    default:
4055
        /* Just here to please gcc */
4056
    case 0:
4057
        EPN = value & 0xFFFFFC00;
4058
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4059
            do_flush_tlbs = 1;
4060
        tlb->EPN = EPN;
4061
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
4062
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4063
            do_flush_tlbs = 1;
4064
        tlb->size = size;
4065
        tlb->attr &= ~0x1;
4066
        tlb->attr |= (value >> 8) & 1;
4067
        if (value & 0x200) {
4068
            tlb->prot |= PAGE_VALID;
4069
        } else {
4070
            if (tlb->prot & PAGE_VALID) {
4071
                tlb->prot &= ~PAGE_VALID;
4072
                do_flush_tlbs = 1;
4073
            }
4074
        }
4075
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4076
        if (do_flush_tlbs)
4077
            tlb_flush(env, 1);
4078
        break;
4079
    case 1:
4080
        RPN = value & 0xFFFFFC0F;
4081
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4082
            tlb_flush(env, 1);
4083
        tlb->RPN = RPN;
4084
        break;
4085
    case 2:
4086
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4087
        tlb->prot = tlb->prot & PAGE_VALID;
4088
        if (value & 0x1)
4089
            tlb->prot |= PAGE_READ << 4;
4090
        if (value & 0x2)
4091
            tlb->prot |= PAGE_WRITE << 4;
4092
        if (value & 0x4)
4093
            tlb->prot |= PAGE_EXEC << 4;
4094
        if (value & 0x8)
4095
            tlb->prot |= PAGE_READ;
4096
        if (value & 0x10)
4097
            tlb->prot |= PAGE_WRITE;
4098
        if (value & 0x20)
4099
            tlb->prot |= PAGE_EXEC;
4100
        break;
4101
    }
4102
}
4103

    
4104
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4105
{
4106
    ppcemb_tlb_t *tlb;
4107
    target_ulong ret;
4108
    int size;
4109

    
4110
    entry &= 0x3F;
4111
    tlb = &env->tlb[entry].tlbe;
4112
    switch (word) {
4113
    default:
4114
        /* Just here to please gcc */
4115
    case 0:
4116
        ret = tlb->EPN;
4117
        size = booke_page_size_to_tlb(tlb->size);
4118
        if (size < 0 || size > 0xF)
4119
            size = 1;
4120
        ret |= size << 4;
4121
        if (tlb->attr & 0x1)
4122
            ret |= 0x100;
4123
        if (tlb->prot & PAGE_VALID)
4124
            ret |= 0x200;
4125
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4126
        env->spr[SPR_440_MMUCR] |= tlb->PID;
4127
        break;
4128
    case 1:
4129
        ret = tlb->RPN;
4130
        break;
4131
    case 2:
4132
        ret = tlb->attr & ~0x1;
4133
        if (tlb->prot & (PAGE_READ << 4))
4134
            ret |= 0x1;
4135
        if (tlb->prot & (PAGE_WRITE << 4))
4136
            ret |= 0x2;
4137
        if (tlb->prot & (PAGE_EXEC << 4))
4138
            ret |= 0x4;
4139
        if (tlb->prot & PAGE_READ)
4140
            ret |= 0x8;
4141
        if (tlb->prot & PAGE_WRITE)
4142
            ret |= 0x10;
4143
        if (tlb->prot & PAGE_EXEC)
4144
            ret |= 0x20;
4145
        break;
4146
    }
4147
    return ret;
4148
}
4149

    
4150
target_ulong helper_440_tlbsx (target_ulong address)
4151
{
4152
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4153
}
4154

    
4155
#endif /* !CONFIG_USER_ONLY */