Statistics
| Branch: | Revision:

root / target-ppc / op_helper.c @ 9a78eead

History | View | Annotate | Download (125.8 kB)

1
/*
2
 *  PowerPC emulation helpers for qemu.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <string.h>
20
#include "exec.h"
21
#include "host-utils.h"
22
#include "helper.h"
23

    
24
#include "helper_regs.h"
25

    
26
//#define DEBUG_OP
27
//#define DEBUG_EXCEPTIONS
28
//#define DEBUG_SOFTWARE_TLB
29

    
30
#ifdef DEBUG_SOFTWARE_TLB
31
#  define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
32
#else
33
#  define LOG_SWTLB(...) do { } while (0)
34
#endif
35

    
36

    
37
/*****************************************************************************/
38
/* Exceptions processing helpers */
39

    
40
void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
41
{
42
#if 0
43
    printf("Raise exception %3x code : %d\n", exception, error_code);
44
#endif
45
    env->exception_index = exception;
46
    env->error_code = error_code;
47
    cpu_loop_exit();
48
}
49

    
50
void helper_raise_exception (uint32_t exception)
51
{
52
    helper_raise_exception_err(exception, 0);
53
}
54

    
55
/*****************************************************************************/
56
/* SPR accesses */
57
void helper_load_dump_spr (uint32_t sprn)
58
{
59
    qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
60
             env->spr[sprn]);
61
}
62

    
63
void helper_store_dump_spr (uint32_t sprn)
64
{
65
    qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
66
             env->spr[sprn]);
67
}
68

    
69
target_ulong helper_load_tbl (void)
70
{
71
    return (target_ulong)cpu_ppc_load_tbl(env);
72
}
73

    
74
target_ulong helper_load_tbu (void)
75
{
76
    return cpu_ppc_load_tbu(env);
77
}
78

    
79
target_ulong helper_load_atbl (void)
80
{
81
    return (target_ulong)cpu_ppc_load_atbl(env);
82
}
83

    
84
target_ulong helper_load_atbu (void)
85
{
86
    return cpu_ppc_load_atbu(env);
87
}
88

    
89
target_ulong helper_load_601_rtcl (void)
90
{
91
    return cpu_ppc601_load_rtcl(env);
92
}
93

    
94
target_ulong helper_load_601_rtcu (void)
95
{
96
    return cpu_ppc601_load_rtcu(env);
97
}
98

    
99
#if !defined(CONFIG_USER_ONLY)
100
#if defined (TARGET_PPC64)
101
void helper_store_asr (target_ulong val)
102
{
103
    ppc_store_asr(env, val);
104
}
105
#endif
106

    
107
void helper_store_sdr1 (target_ulong val)
108
{
109
    ppc_store_sdr1(env, val);
110
}
111

    
112
void helper_store_tbl (target_ulong val)
113
{
114
    cpu_ppc_store_tbl(env, val);
115
}
116

    
117
void helper_store_tbu (target_ulong val)
118
{
119
    cpu_ppc_store_tbu(env, val);
120
}
121

    
122
void helper_store_atbl (target_ulong val)
123
{
124
    cpu_ppc_store_atbl(env, val);
125
}
126

    
127
void helper_store_atbu (target_ulong val)
128
{
129
    cpu_ppc_store_atbu(env, val);
130
}
131

    
132
void helper_store_601_rtcl (target_ulong val)
133
{
134
    cpu_ppc601_store_rtcl(env, val);
135
}
136

    
137
void helper_store_601_rtcu (target_ulong val)
138
{
139
    cpu_ppc601_store_rtcu(env, val);
140
}
141

    
142
target_ulong helper_load_decr (void)
143
{
144
    return cpu_ppc_load_decr(env);
145
}
146

    
147
void helper_store_decr (target_ulong val)
148
{
149
    cpu_ppc_store_decr(env, val);
150
}
151

    
152
void helper_store_hid0_601 (target_ulong val)
153
{
154
    target_ulong hid0;
155

    
156
    hid0 = env->spr[SPR_HID0];
157
    if ((val ^ hid0) & 0x00000008) {
158
        /* Change current endianness */
159
        env->hflags &= ~(1 << MSR_LE);
160
        env->hflags_nmsr &= ~(1 << MSR_LE);
161
        env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
162
        env->hflags |= env->hflags_nmsr;
163
        qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
164
                 val & 0x8 ? 'l' : 'b', env->hflags);
165
    }
166
    env->spr[SPR_HID0] = (uint32_t)val;
167
}
168

    
169
void helper_store_403_pbr (uint32_t num, target_ulong value)
170
{
171
    if (likely(env->pb[num] != value)) {
172
        env->pb[num] = value;
173
        /* Should be optimized */
174
        tlb_flush(env, 1);
175
    }
176
}
177

    
178
target_ulong helper_load_40x_pit (void)
179
{
180
    return load_40x_pit(env);
181
}
182

    
183
void helper_store_40x_pit (target_ulong val)
184
{
185
    store_40x_pit(env, val);
186
}
187

    
188
void helper_store_40x_dbcr0 (target_ulong val)
189
{
190
    store_40x_dbcr0(env, val);
191
}
192

    
193
void helper_store_40x_sler (target_ulong val)
194
{
195
    store_40x_sler(env, val);
196
}
197

    
198
void helper_store_booke_tcr (target_ulong val)
199
{
200
    store_booke_tcr(env, val);
201
}
202

    
203
void helper_store_booke_tsr (target_ulong val)
204
{
205
    store_booke_tsr(env, val);
206
}
207

    
208
void helper_store_ibatu (uint32_t nr, target_ulong val)
209
{
210
    ppc_store_ibatu(env, nr, val);
211
}
212

    
213
void helper_store_ibatl (uint32_t nr, target_ulong val)
214
{
215
    ppc_store_ibatl(env, nr, val);
216
}
217

    
218
void helper_store_dbatu (uint32_t nr, target_ulong val)
219
{
220
    ppc_store_dbatu(env, nr, val);
221
}
222

    
223
void helper_store_dbatl (uint32_t nr, target_ulong val)
224
{
225
    ppc_store_dbatl(env, nr, val);
226
}
227

    
228
void helper_store_601_batl (uint32_t nr, target_ulong val)
229
{
230
    ppc_store_ibatl_601(env, nr, val);
231
}
232

    
233
void helper_store_601_batu (uint32_t nr, target_ulong val)
234
{
235
    ppc_store_ibatu_601(env, nr, val);
236
}
237
#endif
238

    
239
/*****************************************************************************/
240
/* Memory load and stores */
241

    
242
static inline target_ulong addr_add(target_ulong addr, target_long arg)
243
{
244
#if defined(TARGET_PPC64)
245
        if (!msr_sf)
246
            return (uint32_t)(addr + arg);
247
        else
248
#endif
249
            return addr + arg;
250
}
251

    
252
void helper_lmw (target_ulong addr, uint32_t reg)
253
{
254
    for (; reg < 32; reg++) {
255
        if (msr_le)
256
            env->gpr[reg] = bswap32(ldl(addr));
257
        else
258
            env->gpr[reg] = ldl(addr);
259
        addr = addr_add(addr, 4);
260
    }
261
}
262

    
263
void helper_stmw (target_ulong addr, uint32_t reg)
264
{
265
    for (; reg < 32; reg++) {
266
        if (msr_le)
267
            stl(addr, bswap32((uint32_t)env->gpr[reg]));
268
        else
269
            stl(addr, (uint32_t)env->gpr[reg]);
270
        addr = addr_add(addr, 4);
271
    }
272
}
273

    
274
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
275
{
276
    int sh;
277
    for (; nb > 3; nb -= 4) {
278
        env->gpr[reg] = ldl(addr);
279
        reg = (reg + 1) % 32;
280
        addr = addr_add(addr, 4);
281
    }
282
    if (unlikely(nb > 0)) {
283
        env->gpr[reg] = 0;
284
        for (sh = 24; nb > 0; nb--, sh -= 8) {
285
            env->gpr[reg] |= ldub(addr) << sh;
286
            addr = addr_add(addr, 1);
287
        }
288
    }
289
}
290
/* PPC32 specification says we must generate an exception if
291
 * rA is in the range of registers to be loaded.
292
 * In an other hand, IBM says this is valid, but rA won't be loaded.
293
 * For now, I'll follow the spec...
294
 */
295
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
296
{
297
    if (likely(xer_bc != 0)) {
298
        if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
299
                     (reg < rb && (reg + xer_bc) > rb))) {
300
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
301
                                       POWERPC_EXCP_INVAL |
302
                                       POWERPC_EXCP_INVAL_LSWX);
303
        } else {
304
            helper_lsw(addr, xer_bc, reg);
305
        }
306
    }
307
}
308

    
309
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
310
{
311
    int sh;
312
    for (; nb > 3; nb -= 4) {
313
        stl(addr, env->gpr[reg]);
314
        reg = (reg + 1) % 32;
315
        addr = addr_add(addr, 4);
316
    }
317
    if (unlikely(nb > 0)) {
318
        for (sh = 24; nb > 0; nb--, sh -= 8) {
319
            stb(addr, (env->gpr[reg] >> sh) & 0xFF);
320
            addr = addr_add(addr, 1);
321
        }
322
    }
323
}
324

    
325
static void do_dcbz(target_ulong addr, int dcache_line_size)
326
{
327
    addr &= ~(dcache_line_size - 1);
328
    int i;
329
    for (i = 0 ; i < dcache_line_size ; i += 4) {
330
        stl(addr + i , 0);
331
    }
332
    if (env->reserve_addr == addr)
333
        env->reserve_addr = (target_ulong)-1ULL;
334
}
335

    
336
void helper_dcbz(target_ulong addr)
337
{
338
    do_dcbz(addr, env->dcache_line_size);
339
}
340

    
341
void helper_dcbz_970(target_ulong addr)
342
{
343
    if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
344
        do_dcbz(addr, 32);
345
    else
346
        do_dcbz(addr, env->dcache_line_size);
347
}
348

    
349
void helper_icbi(target_ulong addr)
350
{
351
    addr &= ~(env->dcache_line_size - 1);
352
    /* Invalidate one cache line :
353
     * PowerPC specification says this is to be treated like a load
354
     * (not a fetch) by the MMU. To be sure it will be so,
355
     * do the load "by hand".
356
     */
357
    ldl(addr);
358
    tb_invalidate_page_range(addr, addr + env->icache_line_size);
359
}
360

    
361
// XXX: to be tested
362
target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
363
{
364
    int i, c, d;
365
    d = 24;
366
    for (i = 0; i < xer_bc; i++) {
367
        c = ldub(addr);
368
        addr = addr_add(addr, 1);
369
        /* ra (if not 0) and rb are never modified */
370
        if (likely(reg != rb && (ra == 0 || reg != ra))) {
371
            env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
372
        }
373
        if (unlikely(c == xer_cmp))
374
            break;
375
        if (likely(d != 0)) {
376
            d -= 8;
377
        } else {
378
            d = 24;
379
            reg++;
380
            reg = reg & 0x1F;
381
        }
382
    }
383
    return i;
384
}
385

    
386
/*****************************************************************************/
387
/* Fixed point operations helpers */
388
#if defined(TARGET_PPC64)
389

    
390
/* multiply high word */
391
uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
392
{
393
    uint64_t tl, th;
394

    
395
    muls64(&tl, &th, arg1, arg2);
396
    return th;
397
}
398

    
399
/* multiply high word unsigned */
400
uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
401
{
402
    uint64_t tl, th;
403

    
404
    mulu64(&tl, &th, arg1, arg2);
405
    return th;
406
}
407

    
408
uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
409
{
410
    int64_t th;
411
    uint64_t tl;
412

    
413
    muls64(&tl, (uint64_t *)&th, arg1, arg2);
414
    /* If th != 0 && th != -1, then we had an overflow */
415
    if (likely((uint64_t)(th + 1) <= 1)) {
416
        env->xer &= ~(1 << XER_OV);
417
    } else {
418
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
419
    }
420
    return (int64_t)tl;
421
}
422
#endif
423

    
424
target_ulong helper_cntlzw (target_ulong t)
425
{
426
    return clz32(t);
427
}
428

    
429
#if defined(TARGET_PPC64)
430
target_ulong helper_cntlzd (target_ulong t)
431
{
432
    return clz64(t);
433
}
434
#endif
435

    
436
/* shift right arithmetic helper */
437
target_ulong helper_sraw (target_ulong value, target_ulong shift)
438
{
439
    int32_t ret;
440

    
441
    if (likely(!(shift & 0x20))) {
442
        if (likely((uint32_t)shift != 0)) {
443
            shift &= 0x1f;
444
            ret = (int32_t)value >> shift;
445
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
446
                env->xer &= ~(1 << XER_CA);
447
            } else {
448
                env->xer |= (1 << XER_CA);
449
            }
450
        } else {
451
            ret = (int32_t)value;
452
            env->xer &= ~(1 << XER_CA);
453
        }
454
    } else {
455
        ret = (int32_t)value >> 31;
456
        if (ret) {
457
            env->xer |= (1 << XER_CA);
458
        } else {
459
            env->xer &= ~(1 << XER_CA);
460
        }
461
    }
462
    return (target_long)ret;
463
}
464

    
465
#if defined(TARGET_PPC64)
466
target_ulong helper_srad (target_ulong value, target_ulong shift)
467
{
468
    int64_t ret;
469

    
470
    if (likely(!(shift & 0x40))) {
471
        if (likely((uint64_t)shift != 0)) {
472
            shift &= 0x3f;
473
            ret = (int64_t)value >> shift;
474
            if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
475
                env->xer &= ~(1 << XER_CA);
476
            } else {
477
                env->xer |= (1 << XER_CA);
478
            }
479
        } else {
480
            ret = (int64_t)value;
481
            env->xer &= ~(1 << XER_CA);
482
        }
483
    } else {
484
        ret = (int64_t)value >> 63;
485
        if (ret) {
486
            env->xer |= (1 << XER_CA);
487
        } else {
488
            env->xer &= ~(1 << XER_CA);
489
        }
490
    }
491
    return ret;
492
}
493
#endif
494

    
495
target_ulong helper_popcntb (target_ulong val)
496
{
497
    val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
498
    val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
499
    val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
500
    return val;
501
}
502

    
503
#if defined(TARGET_PPC64)
504
target_ulong helper_popcntb_64 (target_ulong val)
505
{
506
    val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
507
    val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
508
    val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
509
    return val;
510
}
511
#endif
512

    
513
/*****************************************************************************/
514
/* Floating point operations helpers */
515
uint64_t helper_float32_to_float64(uint32_t arg)
516
{
517
    CPU_FloatU f;
518
    CPU_DoubleU d;
519
    f.l = arg;
520
    d.d = float32_to_float64(f.f, &env->fp_status);
521
    return d.ll;
522
}
523

    
524
uint32_t helper_float64_to_float32(uint64_t arg)
525
{
526
    CPU_FloatU f;
527
    CPU_DoubleU d;
528
    d.ll = arg;
529
    f.f = float64_to_float32(d.d, &env->fp_status);
530
    return f.l;
531
}
532

    
533
static inline int isden(float64 d)
534
{
535
    CPU_DoubleU u;
536

    
537
    u.d = d;
538

    
539
    return ((u.ll >> 52) & 0x7FF) == 0;
540
}
541

    
542
uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
543
{
544
    CPU_DoubleU farg;
545
    int isneg;
546
    int ret;
547
    farg.ll = arg;
548
    isneg = float64_is_neg(farg.d);
549
    if (unlikely(float64_is_nan(farg.d))) {
550
        if (float64_is_signaling_nan(farg.d)) {
551
            /* Signaling NaN: flags are undefined */
552
            ret = 0x00;
553
        } else {
554
            /* Quiet NaN */
555
            ret = 0x11;
556
        }
557
    } else if (unlikely(float64_is_infinity(farg.d))) {
558
        /* +/- infinity */
559
        if (isneg)
560
            ret = 0x09;
561
        else
562
            ret = 0x05;
563
    } else {
564
        if (float64_is_zero(farg.d)) {
565
            /* +/- zero */
566
            if (isneg)
567
                ret = 0x12;
568
            else
569
                ret = 0x02;
570
        } else {
571
            if (isden(farg.d)) {
572
                /* Denormalized numbers */
573
                ret = 0x10;
574
            } else {
575
                /* Normalized numbers */
576
                ret = 0x00;
577
            }
578
            if (isneg) {
579
                ret |= 0x08;
580
            } else {
581
                ret |= 0x04;
582
            }
583
        }
584
    }
585
    if (set_fprf) {
586
        /* We update FPSCR_FPRF */
587
        env->fpscr &= ~(0x1F << FPSCR_FPRF);
588
        env->fpscr |= ret << FPSCR_FPRF;
589
    }
590
    /* We just need fpcc to update Rc1 */
591
    return ret & 0xF;
592
}
593

    
594
/* Floating-point invalid operations exception */
595
static inline uint64_t fload_invalid_op_excp(int op)
596
{
597
    uint64_t ret = 0;
598
    int ve;
599

    
600
    ve = fpscr_ve;
601
    switch (op) {
602
    case POWERPC_EXCP_FP_VXSNAN:
603
        env->fpscr |= 1 << FPSCR_VXSNAN;
604
        break;
605
    case POWERPC_EXCP_FP_VXSOFT:
606
        env->fpscr |= 1 << FPSCR_VXSOFT;
607
        break;
608
    case POWERPC_EXCP_FP_VXISI:
609
        /* Magnitude subtraction of infinities */
610
        env->fpscr |= 1 << FPSCR_VXISI;
611
        goto update_arith;
612
    case POWERPC_EXCP_FP_VXIDI:
613
        /* Division of infinity by infinity */
614
        env->fpscr |= 1 << FPSCR_VXIDI;
615
        goto update_arith;
616
    case POWERPC_EXCP_FP_VXZDZ:
617
        /* Division of zero by zero */
618
        env->fpscr |= 1 << FPSCR_VXZDZ;
619
        goto update_arith;
620
    case POWERPC_EXCP_FP_VXIMZ:
621
        /* Multiplication of zero by infinity */
622
        env->fpscr |= 1 << FPSCR_VXIMZ;
623
        goto update_arith;
624
    case POWERPC_EXCP_FP_VXVC:
625
        /* Ordered comparison of NaN */
626
        env->fpscr |= 1 << FPSCR_VXVC;
627
        env->fpscr &= ~(0xF << FPSCR_FPCC);
628
        env->fpscr |= 0x11 << FPSCR_FPCC;
629
        /* We must update the target FPR before raising the exception */
630
        if (ve != 0) {
631
            env->exception_index = POWERPC_EXCP_PROGRAM;
632
            env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
633
            /* Update the floating-point enabled exception summary */
634
            env->fpscr |= 1 << FPSCR_FEX;
635
            /* Exception is differed */
636
            ve = 0;
637
        }
638
        break;
639
    case POWERPC_EXCP_FP_VXSQRT:
640
        /* Square root of a negative number */
641
        env->fpscr |= 1 << FPSCR_VXSQRT;
642
    update_arith:
643
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
644
        if (ve == 0) {
645
            /* Set the result to quiet NaN */
646
            ret = 0xFFF8000000000000ULL;
647
            env->fpscr &= ~(0xF << FPSCR_FPCC);
648
            env->fpscr |= 0x11 << FPSCR_FPCC;
649
        }
650
        break;
651
    case POWERPC_EXCP_FP_VXCVI:
652
        /* Invalid conversion */
653
        env->fpscr |= 1 << FPSCR_VXCVI;
654
        env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
655
        if (ve == 0) {
656
            /* Set the result to quiet NaN */
657
            ret = 0xFFF8000000000000ULL;
658
            env->fpscr &= ~(0xF << FPSCR_FPCC);
659
            env->fpscr |= 0x11 << FPSCR_FPCC;
660
        }
661
        break;
662
    }
663
    /* Update the floating-point invalid operation summary */
664
    env->fpscr |= 1 << FPSCR_VX;
665
    /* Update the floating-point exception summary */
666
    env->fpscr |= 1 << FPSCR_FX;
667
    if (ve != 0) {
668
        /* Update the floating-point enabled exception summary */
669
        env->fpscr |= 1 << FPSCR_FEX;
670
        if (msr_fe0 != 0 || msr_fe1 != 0)
671
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
672
    }
673
    return ret;
674
}
675

    
676
static inline void float_zero_divide_excp(void)
677
{
678
    env->fpscr |= 1 << FPSCR_ZX;
679
    env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
680
    /* Update the floating-point exception summary */
681
    env->fpscr |= 1 << FPSCR_FX;
682
    if (fpscr_ze != 0) {
683
        /* Update the floating-point enabled exception summary */
684
        env->fpscr |= 1 << FPSCR_FEX;
685
        if (msr_fe0 != 0 || msr_fe1 != 0) {
686
            helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
687
                                       POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
688
        }
689
    }
690
}
691

    
692
static inline void float_overflow_excp(void)
693
{
694
    env->fpscr |= 1 << FPSCR_OX;
695
    /* Update the floating-point exception summary */
696
    env->fpscr |= 1 << FPSCR_FX;
697
    if (fpscr_oe != 0) {
698
        /* XXX: should adjust the result */
699
        /* Update the floating-point enabled exception summary */
700
        env->fpscr |= 1 << FPSCR_FEX;
701
        /* We must update the target FPR before raising the exception */
702
        env->exception_index = POWERPC_EXCP_PROGRAM;
703
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
704
    } else {
705
        env->fpscr |= 1 << FPSCR_XX;
706
        env->fpscr |= 1 << FPSCR_FI;
707
    }
708
}
709

    
710
static inline void float_underflow_excp(void)
711
{
712
    env->fpscr |= 1 << FPSCR_UX;
713
    /* Update the floating-point exception summary */
714
    env->fpscr |= 1 << FPSCR_FX;
715
    if (fpscr_ue != 0) {
716
        /* XXX: should adjust the result */
717
        /* Update the floating-point enabled exception summary */
718
        env->fpscr |= 1 << FPSCR_FEX;
719
        /* We must update the target FPR before raising the exception */
720
        env->exception_index = POWERPC_EXCP_PROGRAM;
721
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
722
    }
723
}
724

    
725
static inline void float_inexact_excp(void)
726
{
727
    env->fpscr |= 1 << FPSCR_XX;
728
    /* Update the floating-point exception summary */
729
    env->fpscr |= 1 << FPSCR_FX;
730
    if (fpscr_xe != 0) {
731
        /* Update the floating-point enabled exception summary */
732
        env->fpscr |= 1 << FPSCR_FEX;
733
        /* We must update the target FPR before raising the exception */
734
        env->exception_index = POWERPC_EXCP_PROGRAM;
735
        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
736
    }
737
}
738

    
739
static inline void fpscr_set_rounding_mode(void)
740
{
741
    int rnd_type;
742

    
743
    /* Set rounding mode */
744
    switch (fpscr_rn) {
745
    case 0:
746
        /* Best approximation (round to nearest) */
747
        rnd_type = float_round_nearest_even;
748
        break;
749
    case 1:
750
        /* Smaller magnitude (round toward zero) */
751
        rnd_type = float_round_to_zero;
752
        break;
753
    case 2:
754
        /* Round toward +infinite */
755
        rnd_type = float_round_up;
756
        break;
757
    default:
758
    case 3:
759
        /* Round toward -infinite */
760
        rnd_type = float_round_down;
761
        break;
762
    }
763
    set_float_rounding_mode(rnd_type, &env->fp_status);
764
}
765

    
766
void helper_fpscr_clrbit (uint32_t bit)
767
{
768
    int prev;
769

    
770
    prev = (env->fpscr >> bit) & 1;
771
    env->fpscr &= ~(1 << bit);
772
    if (prev == 1) {
773
        switch (bit) {
774
        case FPSCR_RN1:
775
        case FPSCR_RN:
776
            fpscr_set_rounding_mode();
777
            break;
778
        default:
779
            break;
780
        }
781
    }
782
}
783

    
784
void helper_fpscr_setbit (uint32_t bit)
785
{
786
    int prev;
787

    
788
    prev = (env->fpscr >> bit) & 1;
789
    env->fpscr |= 1 << bit;
790
    if (prev == 0) {
791
        switch (bit) {
792
        case FPSCR_VX:
793
            env->fpscr |= 1 << FPSCR_FX;
794
            if (fpscr_ve)
795
                goto raise_ve;
796
        case FPSCR_OX:
797
            env->fpscr |= 1 << FPSCR_FX;
798
            if (fpscr_oe)
799
                goto raise_oe;
800
            break;
801
        case FPSCR_UX:
802
            env->fpscr |= 1 << FPSCR_FX;
803
            if (fpscr_ue)
804
                goto raise_ue;
805
            break;
806
        case FPSCR_ZX:
807
            env->fpscr |= 1 << FPSCR_FX;
808
            if (fpscr_ze)
809
                goto raise_ze;
810
            break;
811
        case FPSCR_XX:
812
            env->fpscr |= 1 << FPSCR_FX;
813
            if (fpscr_xe)
814
                goto raise_xe;
815
            break;
816
        case FPSCR_VXSNAN:
817
        case FPSCR_VXISI:
818
        case FPSCR_VXIDI:
819
        case FPSCR_VXZDZ:
820
        case FPSCR_VXIMZ:
821
        case FPSCR_VXVC:
822
        case FPSCR_VXSOFT:
823
        case FPSCR_VXSQRT:
824
        case FPSCR_VXCVI:
825
            env->fpscr |= 1 << FPSCR_VX;
826
            env->fpscr |= 1 << FPSCR_FX;
827
            if (fpscr_ve != 0)
828
                goto raise_ve;
829
            break;
830
        case FPSCR_VE:
831
            if (fpscr_vx != 0) {
832
            raise_ve:
833
                env->error_code = POWERPC_EXCP_FP;
834
                if (fpscr_vxsnan)
835
                    env->error_code |= POWERPC_EXCP_FP_VXSNAN;
836
                if (fpscr_vxisi)
837
                    env->error_code |= POWERPC_EXCP_FP_VXISI;
838
                if (fpscr_vxidi)
839
                    env->error_code |= POWERPC_EXCP_FP_VXIDI;
840
                if (fpscr_vxzdz)
841
                    env->error_code |= POWERPC_EXCP_FP_VXZDZ;
842
                if (fpscr_vximz)
843
                    env->error_code |= POWERPC_EXCP_FP_VXIMZ;
844
                if (fpscr_vxvc)
845
                    env->error_code |= POWERPC_EXCP_FP_VXVC;
846
                if (fpscr_vxsoft)
847
                    env->error_code |= POWERPC_EXCP_FP_VXSOFT;
848
                if (fpscr_vxsqrt)
849
                    env->error_code |= POWERPC_EXCP_FP_VXSQRT;
850
                if (fpscr_vxcvi)
851
                    env->error_code |= POWERPC_EXCP_FP_VXCVI;
852
                goto raise_excp;
853
            }
854
            break;
855
        case FPSCR_OE:
856
            if (fpscr_ox != 0) {
857
            raise_oe:
858
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
859
                goto raise_excp;
860
            }
861
            break;
862
        case FPSCR_UE:
863
            if (fpscr_ux != 0) {
864
            raise_ue:
865
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
866
                goto raise_excp;
867
            }
868
            break;
869
        case FPSCR_ZE:
870
            if (fpscr_zx != 0) {
871
            raise_ze:
872
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
873
                goto raise_excp;
874
            }
875
            break;
876
        case FPSCR_XE:
877
            if (fpscr_xx != 0) {
878
            raise_xe:
879
                env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
880
                goto raise_excp;
881
            }
882
            break;
883
        case FPSCR_RN1:
884
        case FPSCR_RN:
885
            fpscr_set_rounding_mode();
886
            break;
887
        default:
888
            break;
889
        raise_excp:
890
            /* Update the floating-point enabled exception summary */
891
            env->fpscr |= 1 << FPSCR_FEX;
892
                /* We have to update Rc1 before raising the exception */
893
            env->exception_index = POWERPC_EXCP_PROGRAM;
894
            break;
895
        }
896
    }
897
}
898

    
899
void helper_store_fpscr (uint64_t arg, uint32_t mask)
900
{
901
    /*
902
     * We use only the 32 LSB of the incoming fpr
903
     */
904
    uint32_t prev, new;
905
    int i;
906

    
907
    prev = env->fpscr;
908
    new = (uint32_t)arg;
909
    new &= ~0x60000000;
910
    new |= prev & 0x60000000;
911
    for (i = 0; i < 8; i++) {
912
        if (mask & (1 << i)) {
913
            env->fpscr &= ~(0xF << (4 * i));
914
            env->fpscr |= new & (0xF << (4 * i));
915
        }
916
    }
917
    /* Update VX and FEX */
918
    if (fpscr_ix != 0)
919
        env->fpscr |= 1 << FPSCR_VX;
920
    else
921
        env->fpscr &= ~(1 << FPSCR_VX);
922
    if ((fpscr_ex & fpscr_eex) != 0) {
923
        env->fpscr |= 1 << FPSCR_FEX;
924
        env->exception_index = POWERPC_EXCP_PROGRAM;
925
        /* XXX: we should compute it properly */
926
        env->error_code = POWERPC_EXCP_FP;
927
    }
928
    else
929
        env->fpscr &= ~(1 << FPSCR_FEX);
930
    fpscr_set_rounding_mode();
931
}
932

    
933
void helper_float_check_status (void)
934
{
935
#ifdef CONFIG_SOFTFLOAT
936
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
937
        (env->error_code & POWERPC_EXCP_FP)) {
938
        /* Differred floating-point exception after target FPR update */
939
        if (msr_fe0 != 0 || msr_fe1 != 0)
940
            helper_raise_exception_err(env->exception_index, env->error_code);
941
    } else {
942
        int status = get_float_exception_flags(&env->fp_status);
943
        if (status & float_flag_divbyzero) {
944
            float_zero_divide_excp();
945
        } else if (status & float_flag_overflow) {
946
            float_overflow_excp();
947
        } else if (status & float_flag_underflow) {
948
            float_underflow_excp();
949
        } else if (status & float_flag_inexact) {
950
            float_inexact_excp();
951
        }
952
    }
953
#else
954
    if (env->exception_index == POWERPC_EXCP_PROGRAM &&
955
        (env->error_code & POWERPC_EXCP_FP)) {
956
        /* Differred floating-point exception after target FPR update */
957
        if (msr_fe0 != 0 || msr_fe1 != 0)
958
            helper_raise_exception_err(env->exception_index, env->error_code);
959
    }
960
#endif
961
}
962

    
963
#ifdef CONFIG_SOFTFLOAT
964
void helper_reset_fpstatus (void)
965
{
966
    set_float_exception_flags(0, &env->fp_status);
967
}
968
#endif
969

    
970
/* fadd - fadd. */
971
uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
972
{
973
    CPU_DoubleU farg1, farg2;
974

    
975
    farg1.ll = arg1;
976
    farg2.ll = arg2;
977
#if USE_PRECISE_EMULATION
978
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
979
                 float64_is_signaling_nan(farg2.d))) {
980
        /* sNaN addition */
981
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
982
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
983
                      float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
984
        /* Magnitude subtraction of infinities */
985
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
986
    } else {
987
        farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
988
    }
989
#else
990
    farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
991
#endif
992
    return farg1.ll;
993
}
994

    
995
/* fsub - fsub. */
996
uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
997
{
998
    CPU_DoubleU farg1, farg2;
999

    
1000
    farg1.ll = arg1;
1001
    farg2.ll = arg2;
1002
#if USE_PRECISE_EMULATION
1003
{
1004
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1005
                 float64_is_signaling_nan(farg2.d))) {
1006
        /* sNaN subtraction */
1007
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1008
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1009
                      float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1010
        /* Magnitude subtraction of infinities */
1011
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1012
    } else {
1013
        farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1014
    }
1015
}
1016
#else
1017
    farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1018
#endif
1019
    return farg1.ll;
1020
}
1021

    
1022
/* fmul - fmul. */
1023
uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1024
{
1025
    CPU_DoubleU farg1, farg2;
1026

    
1027
    farg1.ll = arg1;
1028
    farg2.ll = arg2;
1029
#if USE_PRECISE_EMULATION
1030
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1031
                 float64_is_signaling_nan(farg2.d))) {
1032
        /* sNaN multiplication */
1033
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1034
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1035
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1036
        /* Multiplication of zero by infinity */
1037
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1038
    } else {
1039
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1040
    }
1041
#else
1042
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1043
#endif
1044
    return farg1.ll;
1045
}
1046

    
1047
/* fdiv - fdiv. */
1048
uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1049
{
1050
    CPU_DoubleU farg1, farg2;
1051

    
1052
    farg1.ll = arg1;
1053
    farg2.ll = arg2;
1054
#if USE_PRECISE_EMULATION
1055
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1056
                 float64_is_signaling_nan(farg2.d))) {
1057
        /* sNaN division */
1058
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1059
    } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1060
        /* Division of infinity by infinity */
1061
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1062
    } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1063
        /* Division of zero by zero */
1064
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1065
    } else {
1066
        farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1067
    }
1068
#else
1069
    farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1070
#endif
1071
    return farg1.ll;
1072
}
1073

    
1074
/* fabs */
1075
uint64_t helper_fabs (uint64_t arg)
1076
{
1077
    CPU_DoubleU farg;
1078

    
1079
    farg.ll = arg;
1080
    farg.d = float64_abs(farg.d);
1081
    return farg.ll;
1082
}
1083

    
1084
/* fnabs */
1085
uint64_t helper_fnabs (uint64_t arg)
1086
{
1087
    CPU_DoubleU farg;
1088

    
1089
    farg.ll = arg;
1090
    farg.d = float64_abs(farg.d);
1091
    farg.d = float64_chs(farg.d);
1092
    return farg.ll;
1093
}
1094

    
1095
/* fneg */
1096
uint64_t helper_fneg (uint64_t arg)
1097
{
1098
    CPU_DoubleU farg;
1099

    
1100
    farg.ll = arg;
1101
    farg.d = float64_chs(farg.d);
1102
    return farg.ll;
1103
}
1104

    
1105
/* fctiw - fctiw. */
1106
uint64_t helper_fctiw (uint64_t arg)
1107
{
1108
    CPU_DoubleU farg;
1109
    farg.ll = arg;
1110

    
1111
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1112
        /* sNaN conversion */
1113
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1114
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1115
        /* qNan / infinity conversion */
1116
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1117
    } else {
1118
        farg.ll = float64_to_int32(farg.d, &env->fp_status);
1119
#if USE_PRECISE_EMULATION
1120
        /* XXX: higher bits are not supposed to be significant.
1121
         *     to make tests easier, return the same as a real PowerPC 750
1122
         */
1123
        farg.ll |= 0xFFF80000ULL << 32;
1124
#endif
1125
    }
1126
    return farg.ll;
1127
}
1128

    
1129
/* fctiwz - fctiwz. */
1130
uint64_t helper_fctiwz (uint64_t arg)
1131
{
1132
    CPU_DoubleU farg;
1133
    farg.ll = arg;
1134

    
1135
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1136
        /* sNaN conversion */
1137
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1138
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1139
        /* qNan / infinity conversion */
1140
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1141
    } else {
1142
        farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1143
#if USE_PRECISE_EMULATION
1144
        /* XXX: higher bits are not supposed to be significant.
1145
         *     to make tests easier, return the same as a real PowerPC 750
1146
         */
1147
        farg.ll |= 0xFFF80000ULL << 32;
1148
#endif
1149
    }
1150
    return farg.ll;
1151
}
1152

    
1153
#if defined(TARGET_PPC64)
1154
/* fcfid - fcfid. */
1155
uint64_t helper_fcfid (uint64_t arg)
1156
{
1157
    CPU_DoubleU farg;
1158
    farg.d = int64_to_float64(arg, &env->fp_status);
1159
    return farg.ll;
1160
}
1161

    
1162
/* fctid - fctid. */
1163
uint64_t helper_fctid (uint64_t arg)
1164
{
1165
    CPU_DoubleU farg;
1166
    farg.ll = arg;
1167

    
1168
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1169
        /* sNaN conversion */
1170
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1171
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1172
        /* qNan / infinity conversion */
1173
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1174
    } else {
1175
        farg.ll = float64_to_int64(farg.d, &env->fp_status);
1176
    }
1177
    return farg.ll;
1178
}
1179

    
1180
/* fctidz - fctidz. */
1181
uint64_t helper_fctidz (uint64_t arg)
1182
{
1183
    CPU_DoubleU farg;
1184
    farg.ll = arg;
1185

    
1186
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1187
        /* sNaN conversion */
1188
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1189
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1190
        /* qNan / infinity conversion */
1191
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1192
    } else {
1193
        farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1194
    }
1195
    return farg.ll;
1196
}
1197

    
1198
#endif
1199

    
1200
static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1201
{
1202
    CPU_DoubleU farg;
1203
    farg.ll = arg;
1204

    
1205
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1206
        /* sNaN round */
1207
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1208
    } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1209
        /* qNan / infinity round */
1210
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1211
    } else {
1212
        set_float_rounding_mode(rounding_mode, &env->fp_status);
1213
        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1214
        /* Restore rounding mode from FPSCR */
1215
        fpscr_set_rounding_mode();
1216
    }
1217
    return farg.ll;
1218
}
1219

    
1220
uint64_t helper_frin (uint64_t arg)
1221
{
1222
    return do_fri(arg, float_round_nearest_even);
1223
}
1224

    
1225
uint64_t helper_friz (uint64_t arg)
1226
{
1227
    return do_fri(arg, float_round_to_zero);
1228
}
1229

    
1230
uint64_t helper_frip (uint64_t arg)
1231
{
1232
    return do_fri(arg, float_round_up);
1233
}
1234

    
1235
uint64_t helper_frim (uint64_t arg)
1236
{
1237
    return do_fri(arg, float_round_down);
1238
}
1239

    
1240
/* fmadd - fmadd. */
1241
uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1242
{
1243
    CPU_DoubleU farg1, farg2, farg3;
1244

    
1245
    farg1.ll = arg1;
1246
    farg2.ll = arg2;
1247
    farg3.ll = arg3;
1248
#if USE_PRECISE_EMULATION
1249
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1250
                 float64_is_signaling_nan(farg2.d) ||
1251
                 float64_is_signaling_nan(farg3.d))) {
1252
        /* sNaN operation */
1253
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1254
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1255
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1256
        /* Multiplication of zero by infinity */
1257
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1258
    } else {
1259
#ifdef FLOAT128
1260
        /* This is the way the PowerPC specification defines it */
1261
        float128 ft0_128, ft1_128;
1262

    
1263
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1264
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1265
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1266
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1267
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1268
            /* Magnitude subtraction of infinities */
1269
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1270
        } else {
1271
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1272
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1273
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1274
        }
1275
#else
1276
        /* This is OK on x86 hosts */
1277
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1278
#endif
1279
    }
1280
#else
1281
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1282
    farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1283
#endif
1284
    return farg1.ll;
1285
}
1286

    
1287
/* fmsub - fmsub. */
1288
uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1289
{
1290
    CPU_DoubleU farg1, farg2, farg3;
1291

    
1292
    farg1.ll = arg1;
1293
    farg2.ll = arg2;
1294
    farg3.ll = arg3;
1295
#if USE_PRECISE_EMULATION
1296
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1297
                 float64_is_signaling_nan(farg2.d) ||
1298
                 float64_is_signaling_nan(farg3.d))) {
1299
        /* sNaN operation */
1300
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1301
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1302
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1303
        /* Multiplication of zero by infinity */
1304
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1305
    } else {
1306
#ifdef FLOAT128
1307
        /* This is the way the PowerPC specification defines it */
1308
        float128 ft0_128, ft1_128;
1309

    
1310
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1311
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1312
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1313
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1314
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1315
            /* Magnitude subtraction of infinities */
1316
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1317
        } else {
1318
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1319
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1320
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1321
        }
1322
#else
1323
        /* This is OK on x86 hosts */
1324
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1325
#endif
1326
    }
1327
#else
1328
    farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1329
    farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1330
#endif
1331
    return farg1.ll;
1332
}
1333

    
1334
/* fnmadd - fnmadd. */
1335
uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1336
{
1337
    CPU_DoubleU farg1, farg2, farg3;
1338

    
1339
    farg1.ll = arg1;
1340
    farg2.ll = arg2;
1341
    farg3.ll = arg3;
1342

    
1343
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1344
                 float64_is_signaling_nan(farg2.d) ||
1345
                 float64_is_signaling_nan(farg3.d))) {
1346
        /* sNaN operation */
1347
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1348
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1349
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1350
        /* Multiplication of zero by infinity */
1351
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1352
    } else {
1353
#if USE_PRECISE_EMULATION
1354
#ifdef FLOAT128
1355
        /* This is the way the PowerPC specification defines it */
1356
        float128 ft0_128, ft1_128;
1357

    
1358
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1359
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1360
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1361
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1362
                     float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1363
            /* Magnitude subtraction of infinities */
1364
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1365
        } else {
1366
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1367
            ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1368
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1369
        }
1370
#else
1371
        /* This is OK on x86 hosts */
1372
        farg1.d = (farg1.d * farg2.d) + farg3.d;
1373
#endif
1374
#else
1375
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1376
        farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1377
#endif
1378
        if (likely(!float64_is_nan(farg1.d)))
1379
            farg1.d = float64_chs(farg1.d);
1380
    }
1381
    return farg1.ll;
1382
}
1383

    
1384
/* fnmsub - fnmsub. */
1385
uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1386
{
1387
    CPU_DoubleU farg1, farg2, farg3;
1388

    
1389
    farg1.ll = arg1;
1390
    farg2.ll = arg2;
1391
    farg3.ll = arg3;
1392

    
1393
    if (unlikely(float64_is_signaling_nan(farg1.d) ||
1394
                 float64_is_signaling_nan(farg2.d) ||
1395
                 float64_is_signaling_nan(farg3.d))) {
1396
        /* sNaN operation */
1397
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1398
    } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1399
                        (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1400
        /* Multiplication of zero by infinity */
1401
        farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1402
    } else {
1403
#if USE_PRECISE_EMULATION
1404
#ifdef FLOAT128
1405
        /* This is the way the PowerPC specification defines it */
1406
        float128 ft0_128, ft1_128;
1407

    
1408
        ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1409
        ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1410
        ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1411
        if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1412
                     float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1413
            /* Magnitude subtraction of infinities */
1414
            farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1415
        } else {
1416
            ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1417
            ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1418
            farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1419
        }
1420
#else
1421
        /* This is OK on x86 hosts */
1422
        farg1.d = (farg1.d * farg2.d) - farg3.d;
1423
#endif
1424
#else
1425
        farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1426
        farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1427
#endif
1428
        if (likely(!float64_is_nan(farg1.d)))
1429
            farg1.d = float64_chs(farg1.d);
1430
    }
1431
    return farg1.ll;
1432
}
1433

    
1434
/* frsp - frsp. */
1435
uint64_t helper_frsp (uint64_t arg)
1436
{
1437
    CPU_DoubleU farg;
1438
    float32 f32;
1439
    farg.ll = arg;
1440

    
1441
#if USE_PRECISE_EMULATION
1442
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1443
        /* sNaN square root */
1444
       farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1445
    } else {
1446
       f32 = float64_to_float32(farg.d, &env->fp_status);
1447
       farg.d = float32_to_float64(f32, &env->fp_status);
1448
    }
1449
#else
1450
    f32 = float64_to_float32(farg.d, &env->fp_status);
1451
    farg.d = float32_to_float64(f32, &env->fp_status);
1452
#endif
1453
    return farg.ll;
1454
}
1455

    
1456
/* fsqrt - fsqrt. */
1457
uint64_t helper_fsqrt (uint64_t arg)
1458
{
1459
    CPU_DoubleU farg;
1460
    farg.ll = arg;
1461

    
1462
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1463
        /* sNaN square root */
1464
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1465
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1466
        /* Square root of a negative nonzero number */
1467
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1468
    } else {
1469
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1470
    }
1471
    return farg.ll;
1472
}
1473

    
1474
/* fre - fre. */
1475
uint64_t helper_fre (uint64_t arg)
1476
{
1477
    CPU_DoubleU farg;
1478
    farg.ll = arg;
1479

    
1480
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1481
        /* sNaN reciprocal */
1482
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1483
    } else {
1484
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1485
    }
1486
    return farg.d;
1487
}
1488

    
1489
/* fres - fres. */
1490
uint64_t helper_fres (uint64_t arg)
1491
{
1492
    CPU_DoubleU farg;
1493
    float32 f32;
1494
    farg.ll = arg;
1495

    
1496
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1497
        /* sNaN reciprocal */
1498
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1499
    } else {
1500
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1501
        f32 = float64_to_float32(farg.d, &env->fp_status);
1502
        farg.d = float32_to_float64(f32, &env->fp_status);
1503
    }
1504
    return farg.ll;
1505
}
1506

    
1507
/* frsqrte  - frsqrte. */
1508
uint64_t helper_frsqrte (uint64_t arg)
1509
{
1510
    CPU_DoubleU farg;
1511
    float32 f32;
1512
    farg.ll = arg;
1513

    
1514
    if (unlikely(float64_is_signaling_nan(farg.d))) {
1515
        /* sNaN reciprocal square root */
1516
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1517
    } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1518
        /* Reciprocal square root of a negative nonzero number */
1519
        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1520
    } else {
1521
        farg.d = float64_sqrt(farg.d, &env->fp_status);
1522
        farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1523
        f32 = float64_to_float32(farg.d, &env->fp_status);
1524
        farg.d = float32_to_float64(f32, &env->fp_status);
1525
    }
1526
    return farg.ll;
1527
}
1528

    
1529
/* fsel - fsel. */
1530
uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1531
{
1532
    CPU_DoubleU farg1;
1533

    
1534
    farg1.ll = arg1;
1535

    
1536
    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1537
        return arg2;
1538
    else
1539
        return arg3;
1540
}
1541

    
1542
void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1543
{
1544
    CPU_DoubleU farg1, farg2;
1545
    uint32_t ret = 0;
1546
    farg1.ll = arg1;
1547
    farg2.ll = arg2;
1548

    
1549
    if (unlikely(float64_is_nan(farg1.d) ||
1550
                 float64_is_nan(farg2.d))) {
1551
        ret = 0x01UL;
1552
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1553
        ret = 0x08UL;
1554
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1555
        ret = 0x04UL;
1556
    } else {
1557
        ret = 0x02UL;
1558
    }
1559

    
1560
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1561
    env->fpscr |= ret << FPSCR_FPRF;
1562
    env->crf[crfD] = ret;
1563
    if (unlikely(ret == 0x01UL
1564
                 && (float64_is_signaling_nan(farg1.d) ||
1565
                     float64_is_signaling_nan(farg2.d)))) {
1566
        /* sNaN comparison */
1567
        fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1568
    }
1569
}
1570

    
1571
void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1572
{
1573
    CPU_DoubleU farg1, farg2;
1574
    uint32_t ret = 0;
1575
    farg1.ll = arg1;
1576
    farg2.ll = arg2;
1577

    
1578
    if (unlikely(float64_is_nan(farg1.d) ||
1579
                 float64_is_nan(farg2.d))) {
1580
        ret = 0x01UL;
1581
    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1582
        ret = 0x08UL;
1583
    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1584
        ret = 0x04UL;
1585
    } else {
1586
        ret = 0x02UL;
1587
    }
1588

    
1589
    env->fpscr &= ~(0x0F << FPSCR_FPRF);
1590
    env->fpscr |= ret << FPSCR_FPRF;
1591
    env->crf[crfD] = ret;
1592
    if (unlikely (ret == 0x01UL)) {
1593
        if (float64_is_signaling_nan(farg1.d) ||
1594
            float64_is_signaling_nan(farg2.d)) {
1595
            /* sNaN comparison */
1596
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1597
                                  POWERPC_EXCP_FP_VXVC);
1598
        } else {
1599
            /* qNaN comparison */
1600
            fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1601
        }
1602
    }
1603
}
1604

    
1605
#if !defined (CONFIG_USER_ONLY)
1606
void helper_store_msr (target_ulong val)
1607
{
1608
    val = hreg_store_msr(env, val, 0);
1609
    if (val != 0) {
1610
        env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1611
        helper_raise_exception(val);
1612
    }
1613
}
1614

    
1615
static inline void do_rfi(target_ulong nip, target_ulong msr,
1616
                          target_ulong msrm, int keep_msrh)
1617
{
1618
#if defined(TARGET_PPC64)
1619
    if (msr & (1ULL << MSR_SF)) {
1620
        nip = (uint64_t)nip;
1621
        msr &= (uint64_t)msrm;
1622
    } else {
1623
        nip = (uint32_t)nip;
1624
        msr = (uint32_t)(msr & msrm);
1625
        if (keep_msrh)
1626
            msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1627
    }
1628
#else
1629
    nip = (uint32_t)nip;
1630
    msr &= (uint32_t)msrm;
1631
#endif
1632
    /* XXX: beware: this is false if VLE is supported */
1633
    env->nip = nip & ~((target_ulong)0x00000003);
1634
    hreg_store_msr(env, msr, 1);
1635
#if defined (DEBUG_OP)
1636
    cpu_dump_rfi(env->nip, env->msr);
1637
#endif
1638
    /* No need to raise an exception here,
1639
     * as rfi is always the last insn of a TB
1640
     */
1641
    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1642
}
1643

    
1644
void helper_rfi (void)
1645
{
1646
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1647
           ~((target_ulong)0x783F0000), 1);
1648
}
1649

    
1650
#if defined(TARGET_PPC64)
1651
void helper_rfid (void)
1652
{
1653
    do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1654
           ~((target_ulong)0x783F0000), 0);
1655
}
1656

    
1657
void helper_hrfid (void)
1658
{
1659
    do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1660
           ~((target_ulong)0x783F0000), 0);
1661
}
1662
#endif
1663
#endif
1664

    
1665
void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1666
{
1667
    if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1668
                  ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1669
                  ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1670
                  ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1671
                  ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1672
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1673
    }
1674
}
1675

    
1676
#if defined(TARGET_PPC64)
1677
void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1678
{
1679
    if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1680
                  ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1681
                  ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1682
                  ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1683
                  ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1684
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1685
}
1686
#endif
1687

    
1688
/*****************************************************************************/
1689
/* PowerPC 601 specific instructions (POWER bridge) */
1690

    
1691
target_ulong helper_clcs (uint32_t arg)
1692
{
1693
    switch (arg) {
1694
    case 0x0CUL:
1695
        /* Instruction cache line size */
1696
        return env->icache_line_size;
1697
        break;
1698
    case 0x0DUL:
1699
        /* Data cache line size */
1700
        return env->dcache_line_size;
1701
        break;
1702
    case 0x0EUL:
1703
        /* Minimum cache line size */
1704
        return (env->icache_line_size < env->dcache_line_size) ?
1705
                env->icache_line_size : env->dcache_line_size;
1706
        break;
1707
    case 0x0FUL:
1708
        /* Maximum cache line size */
1709
        return (env->icache_line_size > env->dcache_line_size) ?
1710
                env->icache_line_size : env->dcache_line_size;
1711
        break;
1712
    default:
1713
        /* Undefined */
1714
        return 0;
1715
        break;
1716
    }
1717
}
1718

    
1719
target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1720
{
1721
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1722

    
1723
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1724
        (int32_t)arg2 == 0) {
1725
        env->spr[SPR_MQ] = 0;
1726
        return INT32_MIN;
1727
    } else {
1728
        env->spr[SPR_MQ] = tmp % arg2;
1729
        return  tmp / (int32_t)arg2;
1730
    }
1731
}
1732

    
1733
target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1734
{
1735
    uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1736

    
1737
    if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1738
        (int32_t)arg2 == 0) {
1739
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1740
        env->spr[SPR_MQ] = 0;
1741
        return INT32_MIN;
1742
    } else {
1743
        env->spr[SPR_MQ] = tmp % arg2;
1744
        tmp /= (int32_t)arg2;
1745
        if ((int32_t)tmp != tmp) {
1746
            env->xer |= (1 << XER_OV) | (1 << XER_SO);
1747
        } else {
1748
            env->xer &= ~(1 << XER_OV);
1749
        }
1750
        return tmp;
1751
    }
1752
}
1753

    
1754
target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1755
{
1756
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1757
        (int32_t)arg2 == 0) {
1758
        env->spr[SPR_MQ] = 0;
1759
        return INT32_MIN;
1760
    } else {
1761
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1762
        return (int32_t)arg1 / (int32_t)arg2;
1763
    }
1764
}
1765

    
1766
target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1767
{
1768
    if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1769
        (int32_t)arg2 == 0) {
1770
        env->xer |= (1 << XER_OV) | (1 << XER_SO);
1771
        env->spr[SPR_MQ] = 0;
1772
        return INT32_MIN;
1773
    } else {
1774
        env->xer &= ~(1 << XER_OV);
1775
        env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1776
        return (int32_t)arg1 / (int32_t)arg2;
1777
    }
1778
}
1779

    
1780
#if !defined (CONFIG_USER_ONLY)
1781
target_ulong helper_rac (target_ulong addr)
1782
{
1783
    mmu_ctx_t ctx;
1784
    int nb_BATs;
1785
    target_ulong ret = 0;
1786

    
1787
    /* We don't have to generate many instances of this instruction,
1788
     * as rac is supervisor only.
1789
     */
1790
    /* XXX: FIX THIS: Pretend we have no BAT */
1791
    nb_BATs = env->nb_BATs;
1792
    env->nb_BATs = 0;
1793
    if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1794
        ret = ctx.raddr;
1795
    env->nb_BATs = nb_BATs;
1796
    return ret;
1797
}
1798

    
1799
void helper_rfsvc (void)
1800
{
1801
    do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1802
}
1803
#endif
1804

    
1805
/*****************************************************************************/
1806
/* 602 specific instructions */
1807
/* mfrom is the most crazy instruction ever seen, imho ! */
1808
/* Real implementation uses a ROM table. Do the same */
1809
/* Extremly decomposed:
1810
 *                      -arg / 256
1811
 * return 256 * log10(10           + 1.0) + 0.5
1812
 */
1813
#if !defined (CONFIG_USER_ONLY)
1814
target_ulong helper_602_mfrom (target_ulong arg)
1815
{
1816
    if (likely(arg < 602)) {
1817
#include "mfrom_table.c"
1818
        return mfrom_ROM_table[arg];
1819
    } else {
1820
        return 0;
1821
    }
1822
}
1823
#endif
1824

    
1825
/*****************************************************************************/
1826
/* Embedded PowerPC specific helpers */
1827

    
1828
/* XXX: to be improved to check access rights when in user-mode */
1829
target_ulong helper_load_dcr (target_ulong dcrn)
1830
{
1831
    uint32_t val = 0;
1832

    
1833
    if (unlikely(env->dcr_env == NULL)) {
1834
        qemu_log("No DCR environment\n");
1835
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1836
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1837
    } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1838
        qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1839
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1840
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1841
    }
1842
    return val;
1843
}
1844

    
1845
void helper_store_dcr (target_ulong dcrn, target_ulong val)
1846
{
1847
    if (unlikely(env->dcr_env == NULL)) {
1848
        qemu_log("No DCR environment\n");
1849
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1850
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1851
    } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1852
        qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1853
        helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1854
                                   POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1855
    }
1856
}
1857

    
1858
#if !defined(CONFIG_USER_ONLY)
1859
void helper_40x_rfci (void)
1860
{
1861
    do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1862
           ~((target_ulong)0xFFFF0000), 0);
1863
}
1864

    
1865
void helper_rfci (void)
1866
{
1867
    do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1868
           ~((target_ulong)0x3FFF0000), 0);
1869
}
1870

    
1871
void helper_rfdi (void)
1872
{
1873
    do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1874
           ~((target_ulong)0x3FFF0000), 0);
1875
}
1876

    
1877
void helper_rfmci (void)
1878
{
1879
    do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1880
           ~((target_ulong)0x3FFF0000), 0);
1881
}
1882
#endif
1883

    
1884
/* 440 specific */
1885
target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1886
{
1887
    target_ulong mask;
1888
    int i;
1889

    
1890
    i = 1;
1891
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1892
        if ((high & mask) == 0) {
1893
            if (update_Rc) {
1894
                env->crf[0] = 0x4;
1895
            }
1896
            goto done;
1897
        }
1898
        i++;
1899
    }
1900
    for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1901
        if ((low & mask) == 0) {
1902
            if (update_Rc) {
1903
                env->crf[0] = 0x8;
1904
            }
1905
            goto done;
1906
        }
1907
        i++;
1908
    }
1909
    if (update_Rc) {
1910
        env->crf[0] = 0x2;
1911
    }
1912
 done:
1913
    env->xer = (env->xer & ~0x7F) | i;
1914
    if (update_Rc) {
1915
        env->crf[0] |= xer_so;
1916
    }
1917
    return i;
1918
}
1919

    
1920
/*****************************************************************************/
1921
/* Altivec extension helpers */
1922
#if defined(HOST_WORDS_BIGENDIAN)
1923
#define HI_IDX 0
1924
#define LO_IDX 1
1925
#else
1926
#define HI_IDX 1
1927
#define LO_IDX 0
1928
#endif
1929

    
1930
#if defined(HOST_WORDS_BIGENDIAN)
1931
#define VECTOR_FOR_INORDER_I(index, element)            \
1932
    for (index = 0; index < ARRAY_SIZE(r->element); index++)
1933
#else
1934
#define VECTOR_FOR_INORDER_I(index, element)            \
1935
  for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1936
#endif
1937

    
1938
/* If X is a NaN, store the corresponding QNaN into RESULT.  Otherwise,
1939
 * execute the following block.  */
1940
#define DO_HANDLE_NAN(result, x)                \
1941
    if (float32_is_nan(x) || float32_is_signaling_nan(x)) {     \
1942
        CPU_FloatU __f;                                         \
1943
        __f.f = x;                                              \
1944
        __f.l = __f.l | (1 << 22);  /* Set QNaN bit. */         \
1945
        result = __f.f;                                         \
1946
    } else
1947

    
1948
#define HANDLE_NAN1(result, x)                  \
1949
    DO_HANDLE_NAN(result, x)
1950
#define HANDLE_NAN2(result, x, y)               \
1951
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1952
#define HANDLE_NAN3(result, x, y, z)            \
1953
    DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1954

    
1955
/* Saturating arithmetic helpers.  */
1956
#define SATCVT(from, to, from_type, to_type, min, max)                  \
1957
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1958
    {                                                                   \
1959
        to_type r;                                                      \
1960
        if (x < (from_type)min) {                                       \
1961
            r = min;                                                    \
1962
            *sat = 1;                                                   \
1963
        } else if (x > (from_type)max) {                                \
1964
            r = max;                                                    \
1965
            *sat = 1;                                                   \
1966
        } else {                                                        \
1967
            r = x;                                                      \
1968
        }                                                               \
1969
        return r;                                                       \
1970
    }
1971
#define SATCVTU(from, to, from_type, to_type, min, max)                 \
1972
    static inline to_type cvt##from##to(from_type x, int *sat)          \
1973
    {                                                                   \
1974
        to_type r;                                                      \
1975
        if (x > (from_type)max) {                                       \
1976
            r = max;                                                    \
1977
            *sat = 1;                                                   \
1978
        } else {                                                        \
1979
            r = x;                                                      \
1980
        }                                                               \
1981
        return r;                                                       \
1982
    }
1983
SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
1984
SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
1985
SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
1986

    
1987
SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
1988
SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
1989
SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
1990
SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
1991
SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
1992
SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
1993
#undef SATCVT
1994
#undef SATCVTU
1995

    
1996
#define LVE(name, access, swap, element)                        \
1997
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
1998
    {                                                           \
1999
        size_t n_elems = ARRAY_SIZE(r->element);                \
2000
        int adjust = HI_IDX*(n_elems-1);                        \
2001
        int sh = sizeof(r->element[0]) >> 1;                    \
2002
        int index = (addr & 0xf) >> sh;                         \
2003
        if(msr_le) {                                            \
2004
            r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2005
        } else {                                                        \
2006
            r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2007
        }                                                               \
2008
    }
2009
#define I(x) (x)
2010
LVE(lvebx, ldub, I, u8)
2011
LVE(lvehx, lduw, bswap16, u16)
2012
LVE(lvewx, ldl, bswap32, u32)
2013
#undef I
2014
#undef LVE
2015

    
2016
void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2017
{
2018
    int i, j = (sh & 0xf);
2019

    
2020
    VECTOR_FOR_INORDER_I (i, u8) {
2021
        r->u8[i] = j++;
2022
    }
2023
}
2024

    
2025
void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2026
{
2027
    int i, j = 0x10 - (sh & 0xf);
2028

    
2029
    VECTOR_FOR_INORDER_I (i, u8) {
2030
        r->u8[i] = j++;
2031
    }
2032
}
2033

    
2034
#define STVE(name, access, swap, element)                       \
2035
    void helper_##name (ppc_avr_t *r, target_ulong addr)        \
2036
    {                                                           \
2037
        size_t n_elems = ARRAY_SIZE(r->element);                \
2038
        int adjust = HI_IDX*(n_elems-1);                        \
2039
        int sh = sizeof(r->element[0]) >> 1;                    \
2040
        int index = (addr & 0xf) >> sh;                         \
2041
        if(msr_le) {                                            \
2042
            access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2043
        } else {                                                        \
2044
            access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2045
        }                                                               \
2046
    }
2047
#define I(x) (x)
2048
STVE(stvebx, stb, I, u8)
2049
STVE(stvehx, stw, bswap16, u16)
2050
STVE(stvewx, stl, bswap32, u32)
2051
#undef I
2052
#undef LVE
2053

    
2054
void helper_mtvscr (ppc_avr_t *r)
2055
{
2056
#if defined(HOST_WORDS_BIGENDIAN)
2057
    env->vscr = r->u32[3];
2058
#else
2059
    env->vscr = r->u32[0];
2060
#endif
2061
    set_flush_to_zero(vscr_nj, &env->vec_status);
2062
}
2063

    
2064
void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2065
{
2066
    int i;
2067
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2068
        r->u32[i] = ~a->u32[i] < b->u32[i];
2069
    }
2070
}
2071

    
2072
#define VARITH_DO(name, op, element)        \
2073
void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)          \
2074
{                                                                       \
2075
    int i;                                                              \
2076
    for (i = 0; i < ARRAY_SIZE(r->element); i++) {                      \
2077
        r->element[i] = a->element[i] op b->element[i];                 \
2078
    }                                                                   \
2079
}
2080
#define VARITH(suffix, element)                  \
2081
  VARITH_DO(add##suffix, +, element)             \
2082
  VARITH_DO(sub##suffix, -, element)
2083
VARITH(ubm, u8)
2084
VARITH(uhm, u16)
2085
VARITH(uwm, u32)
2086
#undef VARITH_DO
2087
#undef VARITH
2088

    
2089
#define VARITHFP(suffix, func)                                          \
2090
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2091
    {                                                                   \
2092
        int i;                                                          \
2093
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2094
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2095
                r->f[i] = func(a->f[i], b->f[i], &env->vec_status);     \
2096
            }                                                           \
2097
        }                                                               \
2098
    }
2099
VARITHFP(addfp, float32_add)
2100
VARITHFP(subfp, float32_sub)
2101
#undef VARITHFP
2102

    
2103
#define VARITHSAT_CASE(type, op, cvt, element)                          \
2104
    {                                                                   \
2105
        type result = (type)a->element[i] op (type)b->element[i];       \
2106
        r->element[i] = cvt(result, &sat);                              \
2107
    }
2108

    
2109
#define VARITHSAT_DO(name, op, optype, cvt, element)                    \
2110
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2111
    {                                                                   \
2112
        int sat = 0;                                                    \
2113
        int i;                                                          \
2114
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2115
            switch (sizeof(r->element[0])) {                            \
2116
            case 1: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2117
            case 2: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2118
            case 4: VARITHSAT_CASE(optype, op, cvt, element); break;    \
2119
            }                                                           \
2120
        }                                                               \
2121
        if (sat) {                                                      \
2122
            env->vscr |= (1 << VSCR_SAT);                               \
2123
        }                                                               \
2124
    }
2125
#define VARITHSAT_SIGNED(suffix, element, optype, cvt)        \
2126
    VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element)    \
2127
    VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2128
#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt)       \
2129
    VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element)     \
2130
    VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2131
VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2132
VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2133
VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2134
VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2135
VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2136
VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2137
#undef VARITHSAT_CASE
2138
#undef VARITHSAT_DO
2139
#undef VARITHSAT_SIGNED
2140
#undef VARITHSAT_UNSIGNED
2141

    
2142
#define VAVG_DO(name, element, etype)                                   \
2143
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2144
    {                                                                   \
2145
        int i;                                                          \
2146
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2147
            etype x = (etype)a->element[i] + (etype)b->element[i] + 1;  \
2148
            r->element[i] = x >> 1;                                     \
2149
        }                                                               \
2150
    }
2151

    
2152
#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2153
    VAVG_DO(avgs##type, signed_element, signed_type)                    \
2154
    VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2155
VAVG(b, s8, int16_t, u8, uint16_t)
2156
VAVG(h, s16, int32_t, u16, uint32_t)
2157
VAVG(w, s32, int64_t, u32, uint64_t)
2158
#undef VAVG_DO
2159
#undef VAVG
2160

    
2161
#define VCF(suffix, cvt, element)                                       \
2162
    void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2163
    {                                                                   \
2164
        int i;                                                          \
2165
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2166
            float32 t = cvt(b->element[i], &env->vec_status);           \
2167
            r->f[i] = float32_scalbn (t, -uim, &env->vec_status);       \
2168
        }                                                               \
2169
    }
2170
VCF(ux, uint32_to_float32, u32)
2171
VCF(sx, int32_to_float32, s32)
2172
#undef VCF
2173

    
2174
#define VCMP_DO(suffix, compare, element, record)                       \
2175
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2176
    {                                                                   \
2177
        uint32_t ones = (uint32_t)-1;                                   \
2178
        uint32_t all = ones;                                            \
2179
        uint32_t none = 0;                                              \
2180
        int i;                                                          \
2181
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2182
            uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2183
            switch (sizeof (a->element[0])) {                           \
2184
            case 4: r->u32[i] = result; break;                          \
2185
            case 2: r->u16[i] = result; break;                          \
2186
            case 1: r->u8[i] = result; break;                           \
2187
            }                                                           \
2188
            all &= result;                                              \
2189
            none |= result;                                             \
2190
        }                                                               \
2191
        if (record) {                                                   \
2192
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2193
        }                                                               \
2194
    }
2195
#define VCMP(suffix, compare, element)          \
2196
    VCMP_DO(suffix, compare, element, 0)        \
2197
    VCMP_DO(suffix##_dot, compare, element, 1)
2198
VCMP(equb, ==, u8)
2199
VCMP(equh, ==, u16)
2200
VCMP(equw, ==, u32)
2201
VCMP(gtub, >, u8)
2202
VCMP(gtuh, >, u16)
2203
VCMP(gtuw, >, u32)
2204
VCMP(gtsb, >, s8)
2205
VCMP(gtsh, >, s16)
2206
VCMP(gtsw, >, s32)
2207
#undef VCMP_DO
2208
#undef VCMP
2209

    
2210
#define VCMPFP_DO(suffix, compare, order, record)                       \
2211
    void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2212
    {                                                                   \
2213
        uint32_t ones = (uint32_t)-1;                                   \
2214
        uint32_t all = ones;                                            \
2215
        uint32_t none = 0;                                              \
2216
        int i;                                                          \
2217
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2218
            uint32_t result;                                            \
2219
            int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2220
            if (rel == float_relation_unordered) {                      \
2221
                result = 0;                                             \
2222
            } else if (rel compare order) {                             \
2223
                result = ones;                                          \
2224
            } else {                                                    \
2225
                result = 0;                                             \
2226
            }                                                           \
2227
            r->u32[i] = result;                                         \
2228
            all &= result;                                              \
2229
            none |= result;                                             \
2230
        }                                                               \
2231
        if (record) {                                                   \
2232
            env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1);       \
2233
        }                                                               \
2234
    }
2235
#define VCMPFP(suffix, compare, order)           \
2236
    VCMPFP_DO(suffix, compare, order, 0)         \
2237
    VCMPFP_DO(suffix##_dot, compare, order, 1)
2238
VCMPFP(eqfp, ==, float_relation_equal)
2239
VCMPFP(gefp, !=, float_relation_less)
2240
VCMPFP(gtfp, ==, float_relation_greater)
2241
#undef VCMPFP_DO
2242
#undef VCMPFP
2243

    
2244
static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2245
                                    int record)
2246
{
2247
    int i;
2248
    int all_in = 0;
2249
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2250
        int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2251
        if (le_rel == float_relation_unordered) {
2252
            r->u32[i] = 0xc0000000;
2253
            /* ALL_IN does not need to be updated here.  */
2254
        } else {
2255
            float32 bneg = float32_chs(b->f[i]);
2256
            int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2257
            int le = le_rel != float_relation_greater;
2258
            int ge = ge_rel != float_relation_less;
2259
            r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2260
            all_in |= (!le | !ge);
2261
        }
2262
    }
2263
    if (record) {
2264
        env->crf[6] = (all_in == 0) << 1;
2265
    }
2266
}
2267

    
2268
void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2269
{
2270
    vcmpbfp_internal(r, a, b, 0);
2271
}
2272

    
2273
void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2274
{
2275
    vcmpbfp_internal(r, a, b, 1);
2276
}
2277

    
2278
#define VCT(suffix, satcvt, element)                                    \
2279
    void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim)  \
2280
    {                                                                   \
2281
        int i;                                                          \
2282
        int sat = 0;                                                    \
2283
        float_status s = env->vec_status;                               \
2284
        set_float_rounding_mode(float_round_to_zero, &s);               \
2285
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2286
            if (float32_is_nan(b->f[i]) ||                              \
2287
                float32_is_signaling_nan(b->f[i])) {                    \
2288
                r->element[i] = 0;                                      \
2289
            } else {                                                    \
2290
                float64 t = float32_to_float64(b->f[i], &s);            \
2291
                int64_t j;                                              \
2292
                t = float64_scalbn(t, uim, &s);                         \
2293
                j = float64_to_int64(t, &s);                            \
2294
                r->element[i] = satcvt(j, &sat);                        \
2295
            }                                                           \
2296
        }                                                               \
2297
        if (sat) {                                                      \
2298
            env->vscr |= (1 << VSCR_SAT);                               \
2299
        }                                                               \
2300
    }
2301
VCT(uxs, cvtsduw, u32)
2302
VCT(sxs, cvtsdsw, s32)
2303
#undef VCT
2304

    
2305
void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2306
{
2307
    int i;
2308
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2309
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2310
            /* Need to do the computation in higher precision and round
2311
             * once at the end.  */
2312
            float64 af, bf, cf, t;
2313
            af = float32_to_float64(a->f[i], &env->vec_status);
2314
            bf = float32_to_float64(b->f[i], &env->vec_status);
2315
            cf = float32_to_float64(c->f[i], &env->vec_status);
2316
            t = float64_mul(af, cf, &env->vec_status);
2317
            t = float64_add(t, bf, &env->vec_status);
2318
            r->f[i] = float64_to_float32(t, &env->vec_status);
2319
        }
2320
    }
2321
}
2322

    
2323
void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2324
{
2325
    int sat = 0;
2326
    int i;
2327

    
2328
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2329
        int32_t prod = a->s16[i] * b->s16[i];
2330
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2331
        r->s16[i] = cvtswsh (t, &sat);
2332
    }
2333

    
2334
    if (sat) {
2335
        env->vscr |= (1 << VSCR_SAT);
2336
    }
2337
}
2338

    
2339
void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2340
{
2341
    int sat = 0;
2342
    int i;
2343

    
2344
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2345
        int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2346
        int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2347
        r->s16[i] = cvtswsh (t, &sat);
2348
    }
2349

    
2350
    if (sat) {
2351
        env->vscr |= (1 << VSCR_SAT);
2352
    }
2353
}
2354

    
2355
#define VMINMAX_DO(name, compare, element)                              \
2356
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2357
    {                                                                   \
2358
        int i;                                                          \
2359
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2360
            if (a->element[i] compare b->element[i]) {                  \
2361
                r->element[i] = b->element[i];                          \
2362
            } else {                                                    \
2363
                r->element[i] = a->element[i];                          \
2364
            }                                                           \
2365
        }                                                               \
2366
    }
2367
#define VMINMAX(suffix, element)                \
2368
  VMINMAX_DO(min##suffix, >, element)           \
2369
  VMINMAX_DO(max##suffix, <, element)
2370
VMINMAX(sb, s8)
2371
VMINMAX(sh, s16)
2372
VMINMAX(sw, s32)
2373
VMINMAX(ub, u8)
2374
VMINMAX(uh, u16)
2375
VMINMAX(uw, u32)
2376
#undef VMINMAX_DO
2377
#undef VMINMAX
2378

    
2379
#define VMINMAXFP(suffix, rT, rF)                                       \
2380
    void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)    \
2381
    {                                                                   \
2382
        int i;                                                          \
2383
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2384
            HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) {                    \
2385
                if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2386
                    r->f[i] = rT->f[i];                                 \
2387
                } else {                                                \
2388
                    r->f[i] = rF->f[i];                                 \
2389
                }                                                       \
2390
            }                                                           \
2391
        }                                                               \
2392
    }
2393
VMINMAXFP(minfp, a, b)
2394
VMINMAXFP(maxfp, b, a)
2395
#undef VMINMAXFP
2396

    
2397
void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2398
{
2399
    int i;
2400
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2401
        int32_t prod = a->s16[i] * b->s16[i];
2402
        r->s16[i] = (int16_t) (prod + c->s16[i]);
2403
    }
2404
}
2405

    
2406
#define VMRG_DO(name, element, highp)                                   \
2407
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2408
    {                                                                   \
2409
        ppc_avr_t result;                                               \
2410
        int i;                                                          \
2411
        size_t n_elems = ARRAY_SIZE(r->element);                        \
2412
        for (i = 0; i < n_elems/2; i++) {                               \
2413
            if (highp) {                                                \
2414
                result.element[i*2+HI_IDX] = a->element[i];             \
2415
                result.element[i*2+LO_IDX] = b->element[i];             \
2416
            } else {                                                    \
2417
                result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2418
                result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2419
            }                                                           \
2420
        }                                                               \
2421
        *r = result;                                                    \
2422
    }
2423
#if defined(HOST_WORDS_BIGENDIAN)
2424
#define MRGHI 0
2425
#define MRGLO 1
2426
#else
2427
#define MRGHI 1
2428
#define MRGLO 0
2429
#endif
2430
#define VMRG(suffix, element)                   \
2431
  VMRG_DO(mrgl##suffix, element, MRGHI)         \
2432
  VMRG_DO(mrgh##suffix, element, MRGLO)
2433
VMRG(b, u8)
2434
VMRG(h, u16)
2435
VMRG(w, u32)
2436
#undef VMRG_DO
2437
#undef VMRG
2438
#undef MRGHI
2439
#undef MRGLO
2440

    
2441
void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2442
{
2443
    int32_t prod[16];
2444
    int i;
2445

    
2446
    for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2447
        prod[i] = (int32_t)a->s8[i] * b->u8[i];
2448
    }
2449

    
2450
    VECTOR_FOR_INORDER_I(i, s32) {
2451
        r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2452
    }
2453
}
2454

    
2455
void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2456
{
2457
    int32_t prod[8];
2458
    int i;
2459

    
2460
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2461
        prod[i] = a->s16[i] * b->s16[i];
2462
    }
2463

    
2464
    VECTOR_FOR_INORDER_I(i, s32) {
2465
        r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2466
    }
2467
}
2468

    
2469
void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2470
{
2471
    int32_t prod[8];
2472
    int i;
2473
    int sat = 0;
2474

    
2475
    for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2476
        prod[i] = (int32_t)a->s16[i] * b->s16[i];
2477
    }
2478

    
2479
    VECTOR_FOR_INORDER_I (i, s32) {
2480
        int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2481
        r->u32[i] = cvtsdsw(t, &sat);
2482
    }
2483

    
2484
    if (sat) {
2485
        env->vscr |= (1 << VSCR_SAT);
2486
    }
2487
}
2488

    
2489
void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2490
{
2491
    uint16_t prod[16];
2492
    int i;
2493

    
2494
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2495
        prod[i] = a->u8[i] * b->u8[i];
2496
    }
2497

    
2498
    VECTOR_FOR_INORDER_I(i, u32) {
2499
        r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2500
    }
2501
}
2502

    
2503
void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2504
{
2505
    uint32_t prod[8];
2506
    int i;
2507

    
2508
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2509
        prod[i] = a->u16[i] * b->u16[i];
2510
    }
2511

    
2512
    VECTOR_FOR_INORDER_I(i, u32) {
2513
        r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2514
    }
2515
}
2516

    
2517
void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2518
{
2519
    uint32_t prod[8];
2520
    int i;
2521
    int sat = 0;
2522

    
2523
    for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2524
        prod[i] = a->u16[i] * b->u16[i];
2525
    }
2526

    
2527
    VECTOR_FOR_INORDER_I (i, s32) {
2528
        uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2529
        r->u32[i] = cvtuduw(t, &sat);
2530
    }
2531

    
2532
    if (sat) {
2533
        env->vscr |= (1 << VSCR_SAT);
2534
    }
2535
}
2536

    
2537
#define VMUL_DO(name, mul_element, prod_element, evenp)                 \
2538
    void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)      \
2539
    {                                                                   \
2540
        int i;                                                          \
2541
        VECTOR_FOR_INORDER_I(i, prod_element) {                         \
2542
            if (evenp) {                                                \
2543
                r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2544
            } else {                                                    \
2545
                r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2546
            }                                                           \
2547
        }                                                               \
2548
    }
2549
#define VMUL(suffix, mul_element, prod_element) \
2550
  VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2551
  VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2552
VMUL(sb, s8, s16)
2553
VMUL(sh, s16, s32)
2554
VMUL(ub, u8, u16)
2555
VMUL(uh, u16, u32)
2556
#undef VMUL_DO
2557
#undef VMUL
2558

    
2559
void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2560
{
2561
    int i;
2562
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2563
        HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2564
            /* Need to do the computation is higher precision and round
2565
             * once at the end.  */
2566
            float64 af, bf, cf, t;
2567
            af = float32_to_float64(a->f[i], &env->vec_status);
2568
            bf = float32_to_float64(b->f[i], &env->vec_status);
2569
            cf = float32_to_float64(c->f[i], &env->vec_status);
2570
            t = float64_mul(af, cf, &env->vec_status);
2571
            t = float64_sub(t, bf, &env->vec_status);
2572
            t = float64_chs(t);
2573
            r->f[i] = float64_to_float32(t, &env->vec_status);
2574
        }
2575
    }
2576
}
2577

    
2578
void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2579
{
2580
    ppc_avr_t result;
2581
    int i;
2582
    VECTOR_FOR_INORDER_I (i, u8) {
2583
        int s = c->u8[i] & 0x1f;
2584
#if defined(HOST_WORDS_BIGENDIAN)
2585
        int index = s & 0xf;
2586
#else
2587
        int index = 15 - (s & 0xf);
2588
#endif
2589
        if (s & 0x10) {
2590
            result.u8[i] = b->u8[index];
2591
        } else {
2592
            result.u8[i] = a->u8[index];
2593
        }
2594
    }
2595
    *r = result;
2596
}
2597

    
2598
#if defined(HOST_WORDS_BIGENDIAN)
2599
#define PKBIG 1
2600
#else
2601
#define PKBIG 0
2602
#endif
2603
void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2604
{
2605
    int i, j;
2606
    ppc_avr_t result;
2607
#if defined(HOST_WORDS_BIGENDIAN)
2608
    const ppc_avr_t *x[2] = { a, b };
2609
#else
2610
    const ppc_avr_t *x[2] = { b, a };
2611
#endif
2612

    
2613
    VECTOR_FOR_INORDER_I (i, u64) {
2614
        VECTOR_FOR_INORDER_I (j, u32){
2615
            uint32_t e = x[i]->u32[j];
2616
            result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2617
                                 ((e >> 6) & 0x3e0) |
2618
                                 ((e >> 3) & 0x1f));
2619
        }
2620
    }
2621
    *r = result;
2622
}
2623

    
2624
#define VPK(suffix, from, to, cvt, dosat)       \
2625
    void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2626
    {                                                                   \
2627
        int i;                                                          \
2628
        int sat = 0;                                                    \
2629
        ppc_avr_t result;                                               \
2630
        ppc_avr_t *a0 = PKBIG ? a : b;                                  \
2631
        ppc_avr_t *a1 = PKBIG ? b : a;                                  \
2632
        VECTOR_FOR_INORDER_I (i, from) {                                \
2633
            result.to[i] = cvt(a0->from[i], &sat);                      \
2634
            result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);  \
2635
        }                                                               \
2636
        *r = result;                                                    \
2637
        if (dosat && sat) {                                             \
2638
            env->vscr |= (1 << VSCR_SAT);                               \
2639
        }                                                               \
2640
    }
2641
#define I(x, y) (x)
2642
VPK(shss, s16, s8, cvtshsb, 1)
2643
VPK(shus, s16, u8, cvtshub, 1)
2644
VPK(swss, s32, s16, cvtswsh, 1)
2645
VPK(swus, s32, u16, cvtswuh, 1)
2646
VPK(uhus, u16, u8, cvtuhub, 1)
2647
VPK(uwus, u32, u16, cvtuwuh, 1)
2648
VPK(uhum, u16, u8, I, 0)
2649
VPK(uwum, u32, u16, I, 0)
2650
#undef I
2651
#undef VPK
2652
#undef PKBIG
2653

    
2654
void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2655
{
2656
    int i;
2657
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2658
        HANDLE_NAN1(r->f[i], b->f[i]) {
2659
            r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2660
        }
2661
    }
2662
}
2663

    
2664
#define VRFI(suffix, rounding)                                          \
2665
    void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
2666
    {                                                                   \
2667
        int i;                                                          \
2668
        float_status s = env->vec_status;                               \
2669
        set_float_rounding_mode(rounding, &s);                          \
2670
        for (i = 0; i < ARRAY_SIZE(r->f); i++) {                        \
2671
            HANDLE_NAN1(r->f[i], b->f[i]) {                             \
2672
                r->f[i] = float32_round_to_int (b->f[i], &s);           \
2673
            }                                                           \
2674
        }                                                               \
2675
    }
2676
VRFI(n, float_round_nearest_even)
2677
VRFI(m, float_round_down)
2678
VRFI(p, float_round_up)
2679
VRFI(z, float_round_to_zero)
2680
#undef VRFI
2681

    
2682
#define VROTATE(suffix, element)                                        \
2683
    void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2684
    {                                                                   \
2685
        int i;                                                          \
2686
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2687
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2688
            unsigned int shift = b->element[i] & mask;                  \
2689
            r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2690
        }                                                               \
2691
    }
2692
VROTATE(b, u8)
2693
VROTATE(h, u16)
2694
VROTATE(w, u32)
2695
#undef VROTATE
2696

    
2697
void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2698
{
2699
    int i;
2700
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2701
        HANDLE_NAN1(r->f[i], b->f[i]) {
2702
            float32 t = float32_sqrt(b->f[i], &env->vec_status);
2703
            r->f[i] = float32_div(float32_one, t, &env->vec_status);
2704
        }
2705
    }
2706
}
2707

    
2708
void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2709
{
2710
    r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2711
    r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2712
}
2713

    
2714
void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
2715
{
2716
    int i;
2717
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2718
        HANDLE_NAN1(r->f[i], b->f[i]) {
2719
            r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2720
        }
2721
    }
2722
}
2723

    
2724
void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2725
{
2726
    int i;
2727
    for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2728
        HANDLE_NAN1(r->f[i], b->f[i]) {
2729
            r->f[i] = float32_log2(b->f[i], &env->vec_status);
2730
        }
2731
    }
2732
}
2733

    
2734
#if defined(HOST_WORDS_BIGENDIAN)
2735
#define LEFT 0
2736
#define RIGHT 1
2737
#else
2738
#define LEFT 1
2739
#define RIGHT 0
2740
#endif
2741
/* The specification says that the results are undefined if all of the
2742
 * shift counts are not identical.  We check to make sure that they are
2743
 * to conform to what real hardware appears to do.  */
2744
#define VSHIFT(suffix, leftp)                                           \
2745
    void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)   \
2746
    {                                                                   \
2747
        int shift = b->u8[LO_IDX*15] & 0x7;                             \
2748
        int doit = 1;                                                   \
2749
        int i;                                                          \
2750
        for (i = 0; i < ARRAY_SIZE(r->u8); i++) {                       \
2751
            doit = doit && ((b->u8[i] & 0x7) == shift);                 \
2752
        }                                                               \
2753
        if (doit) {                                                     \
2754
            if (shift == 0) {                                           \
2755
                *r = *a;                                                \
2756
            } else if (leftp) {                                         \
2757
                uint64_t carry = a->u64[LO_IDX] >> (64 - shift);        \
2758
                r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry;     \
2759
                r->u64[LO_IDX] = a->u64[LO_IDX] << shift;               \
2760
            } else {                                                    \
2761
                uint64_t carry = a->u64[HI_IDX] << (64 - shift);        \
2762
                r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry;     \
2763
                r->u64[HI_IDX] = a->u64[HI_IDX] >> shift;               \
2764
            }                                                           \
2765
        }                                                               \
2766
    }
2767
VSHIFT(l, LEFT)
2768
VSHIFT(r, RIGHT)
2769
#undef VSHIFT
2770
#undef LEFT
2771
#undef RIGHT
2772

    
2773
#define VSL(suffix, element)                                            \
2774
    void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2775
    {                                                                   \
2776
        int i;                                                          \
2777
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2778
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2779
            unsigned int shift = b->element[i] & mask;                  \
2780
            r->element[i] = a->element[i] << shift;                     \
2781
        }                                                               \
2782
    }
2783
VSL(b, u8)
2784
VSL(h, u16)
2785
VSL(w, u32)
2786
#undef VSL
2787

    
2788
void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2789
{
2790
    int sh = shift & 0xf;
2791
    int i;
2792
    ppc_avr_t result;
2793

    
2794
#if defined(HOST_WORDS_BIGENDIAN)
2795
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2796
        int index = sh + i;
2797
        if (index > 0xf) {
2798
            result.u8[i] = b->u8[index-0x10];
2799
        } else {
2800
            result.u8[i] = a->u8[index];
2801
        }
2802
    }
2803
#else
2804
    for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2805
        int index = (16 - sh) + i;
2806
        if (index > 0xf) {
2807
            result.u8[i] = a->u8[index-0x10];
2808
        } else {
2809
            result.u8[i] = b->u8[index];
2810
        }
2811
    }
2812
#endif
2813
    *r = result;
2814
}
2815

    
2816
void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2817
{
2818
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2819

    
2820
#if defined (HOST_WORDS_BIGENDIAN)
2821
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2822
  memset (&r->u8[16-sh], 0, sh);
2823
#else
2824
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2825
  memset (&r->u8[0], 0, sh);
2826
#endif
2827
}
2828

    
2829
/* Experimental testing shows that hardware masks the immediate.  */
2830
#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2831
#if defined(HOST_WORDS_BIGENDIAN)
2832
#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2833
#else
2834
#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2835
#endif
2836
#define VSPLT(suffix, element)                                          \
2837
    void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2838
    {                                                                   \
2839
        uint32_t s = b->element[SPLAT_ELEMENT(element)];                \
2840
        int i;                                                          \
2841
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2842
            r->element[i] = s;                                          \
2843
        }                                                               \
2844
    }
2845
VSPLT(b, u8)
2846
VSPLT(h, u16)
2847
VSPLT(w, u32)
2848
#undef VSPLT
2849
#undef SPLAT_ELEMENT
2850
#undef _SPLAT_MASKED
2851

    
2852
#define VSPLTI(suffix, element, splat_type)                     \
2853
    void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat)  \
2854
    {                                                           \
2855
        splat_type x = (int8_t)(splat << 3) >> 3;               \
2856
        int i;                                                  \
2857
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {          \
2858
            r->element[i] = x;                                  \
2859
        }                                                       \
2860
    }
2861
VSPLTI(b, s8, int8_t)
2862
VSPLTI(h, s16, int16_t)
2863
VSPLTI(w, s32, int32_t)
2864
#undef VSPLTI
2865

    
2866
#define VSR(suffix, element)                                            \
2867
    void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)  \
2868
    {                                                                   \
2869
        int i;                                                          \
2870
        for (i = 0; i < ARRAY_SIZE(r->element); i++) {                  \
2871
            unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2872
            unsigned int shift = b->element[i] & mask;                  \
2873
            r->element[i] = a->element[i] >> shift;                     \
2874
        }                                                               \
2875
    }
2876
VSR(ab, s8)
2877
VSR(ah, s16)
2878
VSR(aw, s32)
2879
VSR(b, u8)
2880
VSR(h, u16)
2881
VSR(w, u32)
2882
#undef VSR
2883

    
2884
void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2885
{
2886
  int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2887

    
2888
#if defined (HOST_WORDS_BIGENDIAN)
2889
  memmove (&r->u8[sh], &a->u8[0], 16-sh);
2890
  memset (&r->u8[0], 0, sh);
2891
#else
2892
  memmove (&r->u8[0], &a->u8[sh], 16-sh);
2893
  memset (&r->u8[16-sh], 0, sh);
2894
#endif
2895
}
2896

    
2897
void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2898
{
2899
    int i;
2900
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2901
        r->u32[i] = a->u32[i] >= b->u32[i];
2902
    }
2903
}
2904

    
2905
void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2906
{
2907
    int64_t t;
2908
    int i, upper;
2909
    ppc_avr_t result;
2910
    int sat = 0;
2911

    
2912
#if defined(HOST_WORDS_BIGENDIAN)
2913
    upper = ARRAY_SIZE(r->s32)-1;
2914
#else
2915
    upper = 0;
2916
#endif
2917
    t = (int64_t)b->s32[upper];
2918
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2919
        t += a->s32[i];
2920
        result.s32[i] = 0;
2921
    }
2922
    result.s32[upper] = cvtsdsw(t, &sat);
2923
    *r = result;
2924

    
2925
    if (sat) {
2926
        env->vscr |= (1 << VSCR_SAT);
2927
    }
2928
}
2929

    
2930
void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2931
{
2932
    int i, j, upper;
2933
    ppc_avr_t result;
2934
    int sat = 0;
2935

    
2936
#if defined(HOST_WORDS_BIGENDIAN)
2937
    upper = 1;
2938
#else
2939
    upper = 0;
2940
#endif
2941
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2942
        int64_t t = (int64_t)b->s32[upper+i*2];
2943
        result.u64[i] = 0;
2944
        for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2945
            t += a->s32[2*i+j];
2946
        }
2947
        result.s32[upper+i*2] = cvtsdsw(t, &sat);
2948
    }
2949

    
2950
    *r = result;
2951
    if (sat) {
2952
        env->vscr |= (1 << VSCR_SAT);
2953
    }
2954
}
2955

    
2956
void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2957
{
2958
    int i, j;
2959
    int sat = 0;
2960

    
2961
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2962
        int64_t t = (int64_t)b->s32[i];
2963
        for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2964
            t += a->s8[4*i+j];
2965
        }
2966
        r->s32[i] = cvtsdsw(t, &sat);
2967
    }
2968

    
2969
    if (sat) {
2970
        env->vscr |= (1 << VSCR_SAT);
2971
    }
2972
}
2973

    
2974
void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2975
{
2976
    int sat = 0;
2977
    int i;
2978

    
2979
    for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2980
        int64_t t = (int64_t)b->s32[i];
2981
        t += a->s16[2*i] + a->s16[2*i+1];
2982
        r->s32[i] = cvtsdsw(t, &sat);
2983
    }
2984

    
2985
    if (sat) {
2986
        env->vscr |= (1 << VSCR_SAT);
2987
    }
2988
}
2989

    
2990
void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2991
{
2992
    int i, j;
2993
    int sat = 0;
2994

    
2995
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2996
        uint64_t t = (uint64_t)b->u32[i];
2997
        for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2998
            t += a->u8[4*i+j];
2999
        }
3000
        r->u32[i] = cvtuduw(t, &sat);
3001
    }
3002

    
3003
    if (sat) {
3004
        env->vscr |= (1 << VSCR_SAT);
3005
    }
3006
}
3007

    
3008
#if defined(HOST_WORDS_BIGENDIAN)
3009
#define UPKHI 1
3010
#define UPKLO 0
3011
#else
3012
#define UPKHI 0
3013
#define UPKLO 1
3014
#endif
3015
#define VUPKPX(suffix, hi)                                      \
3016
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)       \
3017
    {                                                           \
3018
        int i;                                                  \
3019
        ppc_avr_t result;                                       \
3020
        for (i = 0; i < ARRAY_SIZE(r->u32); i++) {              \
3021
            uint16_t e = b->u16[hi ? i : i+4];                  \
3022
            uint8_t a = (e >> 15) ? 0xff : 0;                   \
3023
            uint8_t r = (e >> 10) & 0x1f;                       \
3024
            uint8_t g = (e >> 5) & 0x1f;                        \
3025
            uint8_t b = e & 0x1f;                               \
3026
            result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
3027
        }                                                               \
3028
        *r = result;                                                    \
3029
    }
3030
VUPKPX(lpx, UPKLO)
3031
VUPKPX(hpx, UPKHI)
3032
#undef VUPKPX
3033

    
3034
#define VUPK(suffix, unpacked, packee, hi)                              \
3035
    void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b)               \
3036
    {                                                                   \
3037
        int i;                                                          \
3038
        ppc_avr_t result;                                               \
3039
        if (hi) {                                                       \
3040
            for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) {             \
3041
                result.unpacked[i] = b->packee[i];                      \
3042
            }                                                           \
3043
        } else {                                                        \
3044
            for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3045
                result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3046
            }                                                           \
3047
        }                                                               \
3048
        *r = result;                                                    \
3049
    }
3050
VUPK(hsb, s16, s8, UPKHI)
3051
VUPK(hsh, s32, s16, UPKHI)
3052
VUPK(lsb, s16, s8, UPKLO)
3053
VUPK(lsh, s32, s16, UPKLO)
3054
#undef VUPK
3055
#undef UPKHI
3056
#undef UPKLO
3057

    
3058
#undef DO_HANDLE_NAN
3059
#undef HANDLE_NAN1
3060
#undef HANDLE_NAN2
3061
#undef HANDLE_NAN3
3062
#undef VECTOR_FOR_INORDER_I
3063
#undef HI_IDX
3064
#undef LO_IDX
3065

    
3066
/*****************************************************************************/
3067
/* SPE extension helpers */
3068
/* Use a table to make this quicker */
3069
static uint8_t hbrev[16] = {
3070
    0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3071
    0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3072
};
3073

    
3074
static inline uint8_t byte_reverse(uint8_t val)
3075
{
3076
    return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3077
}
3078

    
3079
static inline uint32_t word_reverse(uint32_t val)
3080
{
3081
    return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3082
        (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3083
}
3084

    
3085
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3086
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3087
{
3088
    uint32_t a, b, d, mask;
3089

    
3090
    mask = UINT32_MAX >> (32 - MASKBITS);
3091
    a = arg1 & mask;
3092
    b = arg2 & mask;
3093
    d = word_reverse(1 + word_reverse(a | ~b));
3094
    return (arg1 & ~mask) | (d & b);
3095
}
3096

    
3097
uint32_t helper_cntlsw32 (uint32_t val)
3098
{
3099
    if (val & 0x80000000)
3100
        return clz32(~val);
3101
    else
3102
        return clz32(val);
3103
}
3104

    
3105
uint32_t helper_cntlzw32 (uint32_t val)
3106
{
3107
    return clz32(val);
3108
}
3109

    
3110
/* Single-precision floating-point conversions */
3111
static inline uint32_t efscfsi(uint32_t val)
3112
{
3113
    CPU_FloatU u;
3114

    
3115
    u.f = int32_to_float32(val, &env->vec_status);
3116

    
3117
    return u.l;
3118
}
3119

    
3120
static inline uint32_t efscfui(uint32_t val)
3121
{
3122
    CPU_FloatU u;
3123

    
3124
    u.f = uint32_to_float32(val, &env->vec_status);
3125

    
3126
    return u.l;
3127
}
3128

    
3129
static inline int32_t efsctsi(uint32_t val)
3130
{
3131
    CPU_FloatU u;
3132

    
3133
    u.l = val;
3134
    /* NaN are not treated the same way IEEE 754 does */
3135
    if (unlikely(float32_is_nan(u.f)))
3136
        return 0;
3137

    
3138
    return float32_to_int32(u.f, &env->vec_status);
3139
}
3140

    
3141
static inline uint32_t efsctui(uint32_t val)
3142
{
3143
    CPU_FloatU u;
3144

    
3145
    u.l = val;
3146
    /* NaN are not treated the same way IEEE 754 does */
3147
    if (unlikely(float32_is_nan(u.f)))
3148
        return 0;
3149

    
3150
    return float32_to_uint32(u.f, &env->vec_status);
3151
}
3152

    
3153
static inline uint32_t efsctsiz(uint32_t val)
3154
{
3155
    CPU_FloatU u;
3156

    
3157
    u.l = val;
3158
    /* NaN are not treated the same way IEEE 754 does */
3159
    if (unlikely(float32_is_nan(u.f)))
3160
        return 0;
3161

    
3162
    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3163
}
3164

    
3165
static inline uint32_t efsctuiz(uint32_t val)
3166
{
3167
    CPU_FloatU u;
3168

    
3169
    u.l = val;
3170
    /* NaN are not treated the same way IEEE 754 does */
3171
    if (unlikely(float32_is_nan(u.f)))
3172
        return 0;
3173

    
3174
    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3175
}
3176

    
3177
static inline uint32_t efscfsf(uint32_t val)
3178
{
3179
    CPU_FloatU u;
3180
    float32 tmp;
3181

    
3182
    u.f = int32_to_float32(val, &env->vec_status);
3183
    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3184
    u.f = float32_div(u.f, tmp, &env->vec_status);
3185

    
3186
    return u.l;
3187
}
3188

    
3189
static inline uint32_t efscfuf(uint32_t val)
3190
{
3191
    CPU_FloatU u;
3192
    float32 tmp;
3193

    
3194
    u.f = uint32_to_float32(val, &env->vec_status);
3195
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3196
    u.f = float32_div(u.f, tmp, &env->vec_status);
3197

    
3198
    return u.l;
3199
}
3200

    
3201
static inline uint32_t efsctsf(uint32_t val)
3202
{
3203
    CPU_FloatU u;
3204
    float32 tmp;
3205

    
3206
    u.l = val;
3207
    /* NaN are not treated the same way IEEE 754 does */
3208
    if (unlikely(float32_is_nan(u.f)))
3209
        return 0;
3210
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3211
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3212

    
3213
    return float32_to_int32(u.f, &env->vec_status);
3214
}
3215

    
3216
static inline uint32_t efsctuf(uint32_t val)
3217
{
3218
    CPU_FloatU u;
3219
    float32 tmp;
3220

    
3221
    u.l = val;
3222
    /* NaN are not treated the same way IEEE 754 does */
3223
    if (unlikely(float32_is_nan(u.f)))
3224
        return 0;
3225
    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3226
    u.f = float32_mul(u.f, tmp, &env->vec_status);
3227

    
3228
    return float32_to_uint32(u.f, &env->vec_status);
3229
}
3230

    
3231
#define HELPER_SPE_SINGLE_CONV(name)                                          \
3232
uint32_t helper_e##name (uint32_t val)                                        \
3233
{                                                                             \
3234
    return e##name(val);                                                      \
3235
}
3236
/* efscfsi */
3237
HELPER_SPE_SINGLE_CONV(fscfsi);
3238
/* efscfui */
3239
HELPER_SPE_SINGLE_CONV(fscfui);
3240
/* efscfuf */
3241
HELPER_SPE_SINGLE_CONV(fscfuf);
3242
/* efscfsf */
3243
HELPER_SPE_SINGLE_CONV(fscfsf);
3244
/* efsctsi */
3245
HELPER_SPE_SINGLE_CONV(fsctsi);
3246
/* efsctui */
3247
HELPER_SPE_SINGLE_CONV(fsctui);
3248
/* efsctsiz */
3249
HELPER_SPE_SINGLE_CONV(fsctsiz);
3250
/* efsctuiz */
3251
HELPER_SPE_SINGLE_CONV(fsctuiz);
3252
/* efsctsf */
3253
HELPER_SPE_SINGLE_CONV(fsctsf);
3254
/* efsctuf */
3255
HELPER_SPE_SINGLE_CONV(fsctuf);
3256

    
3257
#define HELPER_SPE_VECTOR_CONV(name)                                          \
3258
uint64_t helper_ev##name (uint64_t val)                                       \
3259
{                                                                             \
3260
    return ((uint64_t)e##name(val >> 32) << 32) |                             \
3261
            (uint64_t)e##name(val);                                           \
3262
}
3263
/* evfscfsi */
3264
HELPER_SPE_VECTOR_CONV(fscfsi);
3265
/* evfscfui */
3266
HELPER_SPE_VECTOR_CONV(fscfui);
3267
/* evfscfuf */
3268
HELPER_SPE_VECTOR_CONV(fscfuf);
3269
/* evfscfsf */
3270
HELPER_SPE_VECTOR_CONV(fscfsf);
3271
/* evfsctsi */
3272
HELPER_SPE_VECTOR_CONV(fsctsi);
3273
/* evfsctui */
3274
HELPER_SPE_VECTOR_CONV(fsctui);
3275
/* evfsctsiz */
3276
HELPER_SPE_VECTOR_CONV(fsctsiz);
3277
/* evfsctuiz */
3278
HELPER_SPE_VECTOR_CONV(fsctuiz);
3279
/* evfsctsf */
3280
HELPER_SPE_VECTOR_CONV(fsctsf);
3281
/* evfsctuf */
3282
HELPER_SPE_VECTOR_CONV(fsctuf);
3283

    
3284
/* Single-precision floating-point arithmetic */
3285
static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3286
{
3287
    CPU_FloatU u1, u2;
3288
    u1.l = op1;
3289
    u2.l = op2;
3290
    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3291
    return u1.l;
3292
}
3293

    
3294
static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3295
{
3296
    CPU_FloatU u1, u2;
3297
    u1.l = op1;
3298
    u2.l = op2;
3299
    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3300
    return u1.l;
3301
}
3302

    
3303
static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3304
{
3305
    CPU_FloatU u1, u2;
3306
    u1.l = op1;
3307
    u2.l = op2;
3308
    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3309
    return u1.l;
3310
}
3311

    
3312
static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3313
{
3314
    CPU_FloatU u1, u2;
3315
    u1.l = op1;
3316
    u2.l = op2;
3317
    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3318
    return u1.l;
3319
}
3320

    
3321
#define HELPER_SPE_SINGLE_ARITH(name)                                         \
3322
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3323
{                                                                             \
3324
    return e##name(op1, op2);                                                 \
3325
}
3326
/* efsadd */
3327
HELPER_SPE_SINGLE_ARITH(fsadd);
3328
/* efssub */
3329
HELPER_SPE_SINGLE_ARITH(fssub);
3330
/* efsmul */
3331
HELPER_SPE_SINGLE_ARITH(fsmul);
3332
/* efsdiv */
3333
HELPER_SPE_SINGLE_ARITH(fsdiv);
3334

    
3335
#define HELPER_SPE_VECTOR_ARITH(name)                                         \
3336
uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3337
{                                                                             \
3338
    return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
3339
            (uint64_t)e##name(op1, op2);                                      \
3340
}
3341
/* evfsadd */
3342
HELPER_SPE_VECTOR_ARITH(fsadd);
3343
/* evfssub */
3344
HELPER_SPE_VECTOR_ARITH(fssub);
3345
/* evfsmul */
3346
HELPER_SPE_VECTOR_ARITH(fsmul);
3347
/* evfsdiv */
3348
HELPER_SPE_VECTOR_ARITH(fsdiv);
3349

    
3350
/* Single-precision floating-point comparisons */
3351
static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3352
{
3353
    CPU_FloatU u1, u2;
3354
    u1.l = op1;
3355
    u2.l = op2;
3356
    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3357
}
3358

    
3359
static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3360
{
3361
    CPU_FloatU u1, u2;
3362
    u1.l = op1;
3363
    u2.l = op2;
3364
    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3365
}
3366

    
3367
static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3368
{
3369
    CPU_FloatU u1, u2;
3370
    u1.l = op1;
3371
    u2.l = op2;
3372
    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3373
}
3374

    
3375
static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3376
{
3377
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3378
    return efststlt(op1, op2);
3379
}
3380

    
3381
static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3382
{
3383
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3384
    return efststgt(op1, op2);
3385
}
3386

    
3387
static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3388
{
3389
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3390
    return efststeq(op1, op2);
3391
}
3392

    
3393
#define HELPER_SINGLE_SPE_CMP(name)                                           \
3394
uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
3395
{                                                                             \
3396
    return e##name(op1, op2) << 2;                                            \
3397
}
3398
/* efststlt */
3399
HELPER_SINGLE_SPE_CMP(fststlt);
3400
/* efststgt */
3401
HELPER_SINGLE_SPE_CMP(fststgt);
3402
/* efststeq */
3403
HELPER_SINGLE_SPE_CMP(fststeq);
3404
/* efscmplt */
3405
HELPER_SINGLE_SPE_CMP(fscmplt);
3406
/* efscmpgt */
3407
HELPER_SINGLE_SPE_CMP(fscmpgt);
3408
/* efscmpeq */
3409
HELPER_SINGLE_SPE_CMP(fscmpeq);
3410

    
3411
static inline uint32_t evcmp_merge(int t0, int t1)
3412
{
3413
    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3414
}
3415

    
3416
#define HELPER_VECTOR_SPE_CMP(name)                                           \
3417
uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
3418
{                                                                             \
3419
    return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
3420
}
3421
/* evfststlt */
3422
HELPER_VECTOR_SPE_CMP(fststlt);
3423
/* evfststgt */
3424
HELPER_VECTOR_SPE_CMP(fststgt);
3425
/* evfststeq */
3426
HELPER_VECTOR_SPE_CMP(fststeq);
3427
/* evfscmplt */
3428
HELPER_VECTOR_SPE_CMP(fscmplt);
3429
/* evfscmpgt */
3430
HELPER_VECTOR_SPE_CMP(fscmpgt);
3431
/* evfscmpeq */
3432
HELPER_VECTOR_SPE_CMP(fscmpeq);
3433

    
3434
/* Double-precision floating-point conversion */
3435
uint64_t helper_efdcfsi (uint32_t val)
3436
{
3437
    CPU_DoubleU u;
3438

    
3439
    u.d = int32_to_float64(val, &env->vec_status);
3440

    
3441
    return u.ll;
3442
}
3443

    
3444
uint64_t helper_efdcfsid (uint64_t val)
3445
{
3446
    CPU_DoubleU u;
3447

    
3448
    u.d = int64_to_float64(val, &env->vec_status);
3449

    
3450
    return u.ll;
3451
}
3452

    
3453
uint64_t helper_efdcfui (uint32_t val)
3454
{
3455
    CPU_DoubleU u;
3456

    
3457
    u.d = uint32_to_float64(val, &env->vec_status);
3458

    
3459
    return u.ll;
3460
}
3461

    
3462
uint64_t helper_efdcfuid (uint64_t val)
3463
{
3464
    CPU_DoubleU u;
3465

    
3466
    u.d = uint64_to_float64(val, &env->vec_status);
3467

    
3468
    return u.ll;
3469
}
3470

    
3471
uint32_t helper_efdctsi (uint64_t val)
3472
{
3473
    CPU_DoubleU u;
3474

    
3475
    u.ll = val;
3476
    /* NaN are not treated the same way IEEE 754 does */
3477
    if (unlikely(float64_is_nan(u.d)))
3478
        return 0;
3479

    
3480
    return float64_to_int32(u.d, &env->vec_status);
3481
}
3482

    
3483
uint32_t helper_efdctui (uint64_t val)
3484
{
3485
    CPU_DoubleU u;
3486

    
3487
    u.ll = val;
3488
    /* NaN are not treated the same way IEEE 754 does */
3489
    if (unlikely(float64_is_nan(u.d)))
3490
        return 0;
3491

    
3492
    return float64_to_uint32(u.d, &env->vec_status);
3493
}
3494

    
3495
uint32_t helper_efdctsiz (uint64_t val)
3496
{
3497
    CPU_DoubleU u;
3498

    
3499
    u.ll = val;
3500
    /* NaN are not treated the same way IEEE 754 does */
3501
    if (unlikely(float64_is_nan(u.d)))
3502
        return 0;
3503

    
3504
    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3505
}
3506

    
3507
uint64_t helper_efdctsidz (uint64_t val)
3508
{
3509
    CPU_DoubleU u;
3510

    
3511
    u.ll = val;
3512
    /* NaN are not treated the same way IEEE 754 does */
3513
    if (unlikely(float64_is_nan(u.d)))
3514
        return 0;
3515

    
3516
    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3517
}
3518

    
3519
uint32_t helper_efdctuiz (uint64_t val)
3520
{
3521
    CPU_DoubleU u;
3522

    
3523
    u.ll = val;
3524
    /* NaN are not treated the same way IEEE 754 does */
3525
    if (unlikely(float64_is_nan(u.d)))
3526
        return 0;
3527

    
3528
    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3529
}
3530

    
3531
uint64_t helper_efdctuidz (uint64_t val)
3532
{
3533
    CPU_DoubleU u;
3534

    
3535
    u.ll = val;
3536
    /* NaN are not treated the same way IEEE 754 does */
3537
    if (unlikely(float64_is_nan(u.d)))
3538
        return 0;
3539

    
3540
    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3541
}
3542

    
3543
uint64_t helper_efdcfsf (uint32_t val)
3544
{
3545
    CPU_DoubleU u;
3546
    float64 tmp;
3547

    
3548
    u.d = int32_to_float64(val, &env->vec_status);
3549
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3550
    u.d = float64_div(u.d, tmp, &env->vec_status);
3551

    
3552
    return u.ll;
3553
}
3554

    
3555
uint64_t helper_efdcfuf (uint32_t val)
3556
{
3557
    CPU_DoubleU u;
3558
    float64 tmp;
3559

    
3560
    u.d = uint32_to_float64(val, &env->vec_status);
3561
    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3562
    u.d = float64_div(u.d, tmp, &env->vec_status);
3563

    
3564
    return u.ll;
3565
}
3566

    
3567
uint32_t helper_efdctsf (uint64_t val)
3568
{
3569
    CPU_DoubleU u;
3570
    float64 tmp;
3571

    
3572
    u.ll = val;
3573
    /* NaN are not treated the same way IEEE 754 does */
3574
    if (unlikely(float64_is_nan(u.d)))
3575
        return 0;
3576
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3577
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3578

    
3579
    return float64_to_int32(u.d, &env->vec_status);
3580
}
3581

    
3582
uint32_t helper_efdctuf (uint64_t val)
3583
{
3584
    CPU_DoubleU u;
3585
    float64 tmp;
3586

    
3587
    u.ll = val;
3588
    /* NaN are not treated the same way IEEE 754 does */
3589
    if (unlikely(float64_is_nan(u.d)))
3590
        return 0;
3591
    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3592
    u.d = float64_mul(u.d, tmp, &env->vec_status);
3593

    
3594
    return float64_to_uint32(u.d, &env->vec_status);
3595
}
3596

    
3597
uint32_t helper_efscfd (uint64_t val)
3598
{
3599
    CPU_DoubleU u1;
3600
    CPU_FloatU u2;
3601

    
3602
    u1.ll = val;
3603
    u2.f = float64_to_float32(u1.d, &env->vec_status);
3604

    
3605
    return u2.l;
3606
}
3607

    
3608
uint64_t helper_efdcfs (uint32_t val)
3609
{
3610
    CPU_DoubleU u2;
3611
    CPU_FloatU u1;
3612

    
3613
    u1.l = val;
3614
    u2.d = float32_to_float64(u1.f, &env->vec_status);
3615

    
3616
    return u2.ll;
3617
}
3618

    
3619
/* Double precision fixed-point arithmetic */
3620
uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3621
{
3622
    CPU_DoubleU u1, u2;
3623
    u1.ll = op1;
3624
    u2.ll = op2;
3625
    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3626
    return u1.ll;
3627
}
3628

    
3629
uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3630
{
3631
    CPU_DoubleU u1, u2;
3632
    u1.ll = op1;
3633
    u2.ll = op2;
3634
    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3635
    return u1.ll;
3636
}
3637

    
3638
uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3639
{
3640
    CPU_DoubleU u1, u2;
3641
    u1.ll = op1;
3642
    u2.ll = op2;
3643
    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3644
    return u1.ll;
3645
}
3646

    
3647
uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3648
{
3649
    CPU_DoubleU u1, u2;
3650
    u1.ll = op1;
3651
    u2.ll = op2;
3652
    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3653
    return u1.ll;
3654
}
3655

    
3656
/* Double precision floating point helpers */
3657
uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3658
{
3659
    CPU_DoubleU u1, u2;
3660
    u1.ll = op1;
3661
    u2.ll = op2;
3662
    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3663
}
3664

    
3665
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3666
{
3667
    CPU_DoubleU u1, u2;
3668
    u1.ll = op1;
3669
    u2.ll = op2;
3670
    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3671
}
3672

    
3673
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3674
{
3675
    CPU_DoubleU u1, u2;
3676
    u1.ll = op1;
3677
    u2.ll = op2;
3678
    return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3679
}
3680

    
3681
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3682
{
3683
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3684
    return helper_efdtstlt(op1, op2);
3685
}
3686

    
3687
uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3688
{
3689
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3690
    return helper_efdtstgt(op1, op2);
3691
}
3692

    
3693
uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3694
{
3695
    /* XXX: TODO: test special values (NaN, infinites, ...) */
3696
    return helper_efdtsteq(op1, op2);
3697
}
3698

    
3699
/*****************************************************************************/
3700
/* Softmmu support */
3701
#if !defined (CONFIG_USER_ONLY)
3702

    
3703
#define MMUSUFFIX _mmu
3704

    
3705
#define SHIFT 0
3706
#include "softmmu_template.h"
3707

    
3708
#define SHIFT 1
3709
#include "softmmu_template.h"
3710

    
3711
#define SHIFT 2
3712
#include "softmmu_template.h"
3713

    
3714
#define SHIFT 3
3715
#include "softmmu_template.h"
3716

    
3717
/* try to fill the TLB and return an exception if error. If retaddr is
3718
   NULL, it means that the function was called in C code (i.e. not
3719
   from generated code or from helper.c) */
3720
/* XXX: fix it to restore all registers */
3721
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3722
{
3723
    TranslationBlock *tb;
3724
    CPUState *saved_env;
3725
    unsigned long pc;
3726
    int ret;
3727

    
3728
    /* XXX: hack to restore env in all cases, even if not called from
3729
       generated code */
3730
    saved_env = env;
3731
    env = cpu_single_env;
3732
    ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3733
    if (unlikely(ret != 0)) {
3734
        if (likely(retaddr)) {
3735
            /* now we have a real cpu fault */
3736
            pc = (unsigned long)retaddr;
3737
            tb = tb_find_pc(pc);
3738
            if (likely(tb)) {
3739
                /* the PC is inside the translated code. It means that we have
3740
                   a virtual CPU fault */
3741
                cpu_restore_state(tb, env, pc, NULL);
3742
            }
3743
        }
3744
        helper_raise_exception_err(env->exception_index, env->error_code);
3745
    }
3746
    env = saved_env;
3747
}
3748

    
3749
/* Segment registers load and store */
3750
target_ulong helper_load_sr (target_ulong sr_num)
3751
{
3752
#if defined(TARGET_PPC64)
3753
    if (env->mmu_model & POWERPC_MMU_64)
3754
        return ppc_load_sr(env, sr_num);
3755
#endif
3756
    return env->sr[sr_num];
3757
}
3758

    
3759
void helper_store_sr (target_ulong sr_num, target_ulong val)
3760
{
3761
    ppc_store_sr(env, sr_num, val);
3762
}
3763

    
3764
/* SLB management */
3765
#if defined(TARGET_PPC64)
3766
target_ulong helper_load_slb (target_ulong slb_nr)
3767
{
3768
    return ppc_load_slb(env, slb_nr);
3769
}
3770

    
3771
void helper_store_slb (target_ulong rb, target_ulong rs)
3772
{
3773
    ppc_store_slb(env, rb, rs);
3774
}
3775

    
3776
void helper_slbia (void)
3777
{
3778
    ppc_slb_invalidate_all(env);
3779
}
3780

    
3781
void helper_slbie (target_ulong addr)
3782
{
3783
    ppc_slb_invalidate_one(env, addr);
3784
}
3785

    
3786
#endif /* defined(TARGET_PPC64) */
3787

    
3788
/* TLB management */
3789
void helper_tlbia (void)
3790
{
3791
    ppc_tlb_invalidate_all(env);
3792
}
3793

    
3794
void helper_tlbie (target_ulong addr)
3795
{
3796
    ppc_tlb_invalidate_one(env, addr);
3797
}
3798

    
3799
/* Software driven TLBs management */
3800
/* PowerPC 602/603 software TLB load instructions helpers */
3801
static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3802
{
3803
    target_ulong RPN, CMP, EPN;
3804
    int way;
3805

    
3806
    RPN = env->spr[SPR_RPA];
3807
    if (is_code) {
3808
        CMP = env->spr[SPR_ICMP];
3809
        EPN = env->spr[SPR_IMISS];
3810
    } else {
3811
        CMP = env->spr[SPR_DCMP];
3812
        EPN = env->spr[SPR_DMISS];
3813
    }
3814
    way = (env->spr[SPR_SRR1] >> 17) & 1;
3815
    (void)EPN; /* avoid a compiler warning */
3816
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3817
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3818
              RPN, way);
3819
    /* Store this TLB */
3820
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3821
                     way, is_code, CMP, RPN);
3822
}
3823

    
3824
void helper_6xx_tlbd (target_ulong EPN)
3825
{
3826
    do_6xx_tlb(EPN, 0);
3827
}
3828

    
3829
void helper_6xx_tlbi (target_ulong EPN)
3830
{
3831
    do_6xx_tlb(EPN, 1);
3832
}
3833

    
3834
/* PowerPC 74xx software TLB load instructions helpers */
3835
static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3836
{
3837
    target_ulong RPN, CMP, EPN;
3838
    int way;
3839

    
3840
    RPN = env->spr[SPR_PTELO];
3841
    CMP = env->spr[SPR_PTEHI];
3842
    EPN = env->spr[SPR_TLBMISS] & ~0x3;
3843
    way = env->spr[SPR_TLBMISS] & 0x3;
3844
    (void)EPN; /* avoid a compiler warning */
3845
    LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3846
              " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3847
              RPN, way);
3848
    /* Store this TLB */
3849
    ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3850
                     way, is_code, CMP, RPN);
3851
}
3852

    
3853
void helper_74xx_tlbd (target_ulong EPN)
3854
{
3855
    do_74xx_tlb(EPN, 0);
3856
}
3857

    
3858
void helper_74xx_tlbi (target_ulong EPN)
3859
{
3860
    do_74xx_tlb(EPN, 1);
3861
}
3862

    
3863
static inline target_ulong booke_tlb_to_page_size(int size)
3864
{
3865
    return 1024 << (2 * size);
3866
}
3867

    
3868
static inline int booke_page_size_to_tlb(target_ulong page_size)
3869
{
3870
    int size;
3871

    
3872
    switch (page_size) {
3873
    case 0x00000400UL:
3874
        size = 0x0;
3875
        break;
3876
    case 0x00001000UL:
3877
        size = 0x1;
3878
        break;
3879
    case 0x00004000UL:
3880
        size = 0x2;
3881
        break;
3882
    case 0x00010000UL:
3883
        size = 0x3;
3884
        break;
3885
    case 0x00040000UL:
3886
        size = 0x4;
3887
        break;
3888
    case 0x00100000UL:
3889
        size = 0x5;
3890
        break;
3891
    case 0x00400000UL:
3892
        size = 0x6;
3893
        break;
3894
    case 0x01000000UL:
3895
        size = 0x7;
3896
        break;
3897
    case 0x04000000UL:
3898
        size = 0x8;
3899
        break;
3900
    case 0x10000000UL:
3901
        size = 0x9;
3902
        break;
3903
    case 0x40000000UL:
3904
        size = 0xA;
3905
        break;
3906
#if defined (TARGET_PPC64)
3907
    case 0x000100000000ULL:
3908
        size = 0xB;
3909
        break;
3910
    case 0x000400000000ULL:
3911
        size = 0xC;
3912
        break;
3913
    case 0x001000000000ULL:
3914
        size = 0xD;
3915
        break;
3916
    case 0x004000000000ULL:
3917
        size = 0xE;
3918
        break;
3919
    case 0x010000000000ULL:
3920
        size = 0xF;
3921
        break;
3922
#endif
3923
    default:
3924
        size = -1;
3925
        break;
3926
    }
3927

    
3928
    return size;
3929
}
3930

    
3931
/* Helpers for 4xx TLB management */
3932
#define PPC4XX_TLB_ENTRY_MASK       0x0000003f  /* Mask for 64 TLB entries */
3933

    
3934
#define PPC4XX_TLBHI_V              0x00000040
3935
#define PPC4XX_TLBHI_E              0x00000020
3936
#define PPC4XX_TLBHI_SIZE_MIN       0
3937
#define PPC4XX_TLBHI_SIZE_MAX       7
3938
#define PPC4XX_TLBHI_SIZE_DEFAULT   1
3939
#define PPC4XX_TLBHI_SIZE_SHIFT     7
3940
#define PPC4XX_TLBHI_SIZE_MASK      0x00000007
3941

    
3942
#define PPC4XX_TLBLO_EX             0x00000200
3943
#define PPC4XX_TLBLO_WR             0x00000100
3944
#define PPC4XX_TLBLO_ATTR_MASK      0x000000FF
3945
#define PPC4XX_TLBLO_RPN_MASK       0xFFFFFC00
3946

    
3947
target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3948
{
3949
    ppcemb_tlb_t *tlb;
3950
    target_ulong ret;
3951
    int size;
3952

    
3953
    entry &= PPC4XX_TLB_ENTRY_MASK;
3954
    tlb = &env->tlb[entry].tlbe;
3955
    ret = tlb->EPN;
3956
    if (tlb->prot & PAGE_VALID) {
3957
        ret |= PPC4XX_TLBHI_V;
3958
    }
3959
    size = booke_page_size_to_tlb(tlb->size);
3960
    if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
3961
        size = PPC4XX_TLBHI_SIZE_DEFAULT;
3962
    }
3963
    ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
3964
    env->spr[SPR_40x_PID] = tlb->PID;
3965
    return ret;
3966
}
3967

    
3968
target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3969
{
3970
    ppcemb_tlb_t *tlb;
3971
    target_ulong ret;
3972

    
3973
    entry &= PPC4XX_TLB_ENTRY_MASK;
3974
    tlb = &env->tlb[entry].tlbe;
3975
    ret = tlb->RPN;
3976
    if (tlb->prot & PAGE_EXEC) {
3977
        ret |= PPC4XX_TLBLO_EX;
3978
    }
3979
    if (tlb->prot & PAGE_WRITE) {
3980
        ret |= PPC4XX_TLBLO_WR;
3981
    }
3982
    return ret;
3983
}
3984

    
3985
void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3986
{
3987
    ppcemb_tlb_t *tlb;
3988
    target_ulong page, end;
3989

    
3990
    LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
3991
              val);
3992
    entry &= PPC4XX_TLB_ENTRY_MASK;
3993
    tlb = &env->tlb[entry].tlbe;
3994
    /* Invalidate previous TLB (if it's valid) */
3995
    if (tlb->prot & PAGE_VALID) {
3996
        end = tlb->EPN + tlb->size;
3997
        LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
3998
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
3999
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4000
            tlb_flush_page(env, page);
4001
        }
4002
    }
4003
    tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
4004
                                       & PPC4XX_TLBHI_SIZE_MASK);
4005
    /* We cannot handle TLB size < TARGET_PAGE_SIZE.
4006
     * If this ever occurs, one should use the ppcemb target instead
4007
     * of the ppc or ppc64 one
4008
     */
4009
    if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
4010
        cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
4011
                  "are not supported (%d)\n",
4012
                  tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
4013
    }
4014
    tlb->EPN = val & ~(tlb->size - 1);
4015
    if (val & PPC4XX_TLBHI_V) {
4016
        tlb->prot |= PAGE_VALID;
4017
        if (val & PPC4XX_TLBHI_E) {
4018
            /* XXX: TO BE FIXED */
4019
            cpu_abort(env,
4020
                      "Little-endian TLB entries are not supported by now\n");
4021
        }
4022
    } else {
4023
        tlb->prot &= ~PAGE_VALID;
4024
    }
4025
    tlb->PID = env->spr[SPR_40x_PID]; /* PID */
4026
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4027
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4028
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4029
              tlb->prot & PAGE_READ ? 'r' : '-',
4030
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4031
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4032
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4033
    /* Invalidate new TLB (if valid) */
4034
    if (tlb->prot & PAGE_VALID) {
4035
        end = tlb->EPN + tlb->size;
4036
        LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4037
                  TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4038
        for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4039
            tlb_flush_page(env, page);
4040
        }
4041
    }
4042
}
4043

    
4044
void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4045
{
4046
    ppcemb_tlb_t *tlb;
4047

    
4048
    LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4049
              val);
4050
    entry &= PPC4XX_TLB_ENTRY_MASK;
4051
    tlb = &env->tlb[entry].tlbe;
4052
    tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4053
    tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4054
    tlb->prot = PAGE_READ;
4055
    if (val & PPC4XX_TLBLO_EX) {
4056
        tlb->prot |= PAGE_EXEC;
4057
    }
4058
    if (val & PPC4XX_TLBLO_WR) {
4059
        tlb->prot |= PAGE_WRITE;
4060
    }
4061
    LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4062
              " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4063
              (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4064
              tlb->prot & PAGE_READ ? 'r' : '-',
4065
              tlb->prot & PAGE_WRITE ? 'w' : '-',
4066
              tlb->prot & PAGE_EXEC ? 'x' : '-',
4067
              tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4068
}
4069

    
4070
target_ulong helper_4xx_tlbsx (target_ulong address)
4071
{
4072
    return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4073
}
4074

    
4075
/* PowerPC 440 TLB management */
4076
void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4077
{
4078
    ppcemb_tlb_t *tlb;
4079
    target_ulong EPN, RPN, size;
4080
    int do_flush_tlbs;
4081

    
4082
    LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4083
              __func__, word, (int)entry, value);
4084
    do_flush_tlbs = 0;
4085
    entry &= 0x3F;
4086
    tlb = &env->tlb[entry].tlbe;
4087
    switch (word) {
4088
    default:
4089
        /* Just here to please gcc */
4090
    case 0:
4091
        EPN = value & 0xFFFFFC00;
4092
        if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4093
            do_flush_tlbs = 1;
4094
        tlb->EPN = EPN;
4095
        size = booke_tlb_to_page_size((value >> 4) & 0xF);
4096
        if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4097
            do_flush_tlbs = 1;
4098
        tlb->size = size;
4099
        tlb->attr &= ~0x1;
4100
        tlb->attr |= (value >> 8) & 1;
4101
        if (value & 0x200) {
4102
            tlb->prot |= PAGE_VALID;
4103
        } else {
4104
            if (tlb->prot & PAGE_VALID) {
4105
                tlb->prot &= ~PAGE_VALID;
4106
                do_flush_tlbs = 1;
4107
            }
4108
        }
4109
        tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4110
        if (do_flush_tlbs)
4111
            tlb_flush(env, 1);
4112
        break;
4113
    case 1:
4114
        RPN = value & 0xFFFFFC0F;
4115
        if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4116
            tlb_flush(env, 1);
4117
        tlb->RPN = RPN;
4118
        break;
4119
    case 2:
4120
        tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4121
        tlb->prot = tlb->prot & PAGE_VALID;
4122
        if (value & 0x1)
4123
            tlb->prot |= PAGE_READ << 4;
4124
        if (value & 0x2)
4125
            tlb->prot |= PAGE_WRITE << 4;
4126
        if (value & 0x4)
4127
            tlb->prot |= PAGE_EXEC << 4;
4128
        if (value & 0x8)
4129
            tlb->prot |= PAGE_READ;
4130
        if (value & 0x10)
4131
            tlb->prot |= PAGE_WRITE;
4132
        if (value & 0x20)
4133
            tlb->prot |= PAGE_EXEC;
4134
        break;
4135
    }
4136
}
4137

    
4138
target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4139
{
4140
    ppcemb_tlb_t *tlb;
4141
    target_ulong ret;
4142
    int size;
4143

    
4144
    entry &= 0x3F;
4145
    tlb = &env->tlb[entry].tlbe;
4146
    switch (word) {
4147
    default:
4148
        /* Just here to please gcc */
4149
    case 0:
4150
        ret = tlb->EPN;
4151
        size = booke_page_size_to_tlb(tlb->size);
4152
        if (size < 0 || size > 0xF)
4153
            size = 1;
4154
        ret |= size << 4;
4155
        if (tlb->attr & 0x1)
4156
            ret |= 0x100;
4157
        if (tlb->prot & PAGE_VALID)
4158
            ret |= 0x200;
4159
        env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4160
        env->spr[SPR_440_MMUCR] |= tlb->PID;
4161
        break;
4162
    case 1:
4163
        ret = tlb->RPN;
4164
        break;
4165
    case 2:
4166
        ret = tlb->attr & ~0x1;
4167
        if (tlb->prot & (PAGE_READ << 4))
4168
            ret |= 0x1;
4169
        if (tlb->prot & (PAGE_WRITE << 4))
4170
            ret |= 0x2;
4171
        if (tlb->prot & (PAGE_EXEC << 4))
4172
            ret |= 0x4;
4173
        if (tlb->prot & PAGE_READ)
4174
            ret |= 0x8;
4175
        if (tlb->prot & PAGE_WRITE)
4176
            ret |= 0x10;
4177
        if (tlb->prot & PAGE_EXEC)
4178
            ret |= 0x20;
4179
        break;
4180
    }
4181
    return ret;
4182
}
4183

    
4184
target_ulong helper_440_tlbsx (target_ulong address)
4185
{
4186
    return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4187
}
4188

    
4189
#endif /* !CONFIG_USER_ONLY */